prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import glob2
import numpy as np
import pandas as pd
import tensorflow as tf
from skimage.io import imread
# /datasets/faces_emore_112x112_folders/*/*.jpg'
default_image_names_reg = "*/*.jpg"
default_image_classes_rule = lambda path: int(os.path.basename(os.path.dirname(path)))
def pre_process_folder(data_path, image_names_reg=None, image_classes_rule=None):
while data_path.endswith("/"):
data_path = data_path[:-1]
if not data_path.endswith(".npz"):
dest_pickle = os.path.join("./", os.path.basename(data_path) + "_shuffle.npz")
else:
dest_pickle = data_path
if os.path.exists(dest_pickle):
aa = np.load(dest_pickle)
if len(aa.keys()) == 2:
image_names, image_classes, embeddings = aa["image_names"], aa["image_classes"], []
else:
# dataset with embedding values
image_names, image_classes, embeddings = aa["image_names"], aa["image_classes"], aa["embeddings"]
print(">>>> reloaded from dataset backup:", dest_pickle)
else:
if not os.path.exists(data_path):
return [], [], [], 0, None
if image_names_reg is None or image_classes_rule is None:
image_names_reg, image_classes_rule = default_image_names_reg, default_image_classes_rule
image_names = glob2.glob(os.path.join(data_path, image_names_reg))
image_names = np.random.permutation(image_names).tolist()
image_classes = [image_classes_rule(ii) for ii in image_names]
embeddings = np.array([])
np.savez_compressed(dest_pickle, image_names=image_names, image_classes=image_classes)
classes = np.max(image_classes) + 1
return image_names, image_classes, embeddings, classes, dest_pickle
def tf_imread(file_path):
# tf.print('Reading file:', file_path)
img = tf.io.read_file(file_path)
img = tf.image.decode_jpeg(img, channels=3) # [0, 255]
img = tf.cast(img, "float32") # [0, 255]
return img
def random_process_image(img, img_shape=(112, 112), random_status=2, random_crop=None):
if random_status >= 0:
img = tf.image.random_flip_left_right(img)
if random_status >= 1:
# 25.5 == 255 * 0.1
img = tf.image.random_brightness(img, 25.5 * random_status)
if random_status >= 2:
img = tf.image.random_contrast(img, 1 - 0.1 * random_status, 1 + 0.1 * random_status)
img = tf.image.random_saturation(img, 1 - 0.1 * random_status, 1 + 0.1 * random_status)
if random_status >= 3 and random_crop is not None:
img = tf.image.random_crop(img, random_crop)
img = tf.image.resize(img, img_shape)
if random_status >= 1:
img = tf.clip_by_value(img, 0.0, 255.0)
return img
def pick_by_image_per_class(image_classes, image_per_class):
cc = | pd.value_counts(image_classes) | pandas.value_counts |
# author: <NAME>
# date: 2021-12-04
'''This script generates the correlation heat map of the
transformed data
Usage: eda.py --file_path=<file_path> --out_dir=<out_dir>
Options:
--file_path=<file_path> Path to the data file
--out_dir=<out_dir> Path (directory) to save the images
'''
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from docopt import docopt
opt = docopt(__doc__)
def main(file_path, out_dir):
# making a string variable to store information on where to save the file
saving_to = out_dir + "correlation_heatmap_plot"
#saving the transformed data frame file
tranformed_df = pd.read_csv(file_path)
#shortning the target column name to DEFAULT_PAYMENT to fit better in the graph
tranformed_df = tranformed_df.rename(columns={"DEFAULT_PAYMENT_NEXT_MONTH": "DEFAULT_PAYMENT"})
#Splitting the transformed Data Frame into X_train and y_train
X_train, y_train = tranformed_df.drop(columns=["DEFAULT_PAYMENT"]), tranformed_df["DEFAULT_PAYMENT"]
#Creating the correlation plot
cor = | pd.concat((y_train, X_train), axis=1) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = | tm.box_expected([True, False, False], xbox) | pandas._testing.box_expected |
import numpy as np
import pandas as pd
from glob import glob
import os
import sys
from morphomnist import io
def find_data(dirs):
''' glob the different data from the main path '''
data = [[path for path in glob(os.path.join(path_i,'*.*'))] for path_i in dirs]
return data
def merge_datasets(list_paths, data_type):
if data_type == 'gz':
# Define the name
name = list_paths[0].split('/')[-1]
# Load the data
data = [io.load_idx(path) for path in list_paths]
# Merge them
print(data[0].shape)
data_merged = np.concatenate(data)
print(data_merged.shape)
# Save them
io.save_idx(data_merged, os.path.join(OUTPUT_PATH, name))
if data_type == 'csv':
# Define the name
name = list_paths[0].split('/')[-1]
# Load the data
data = [ | pd.read_csv(path) | pandas.read_csv |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""----Definición de las librerías requeridas para la ejecución de la aplicación---"""
from flask import Flask, request, render_template #Interfaz gráfica WEB
##from flask_socketio import SocketIO
from werkzeug.utils import secure_filename #Encriptar información archivos de pdf
from email.mime.multipart import MIMEMultipart #Creación del cuerpo del correo electrónico 1
from email.mime.application import MIMEApplication #Creación del cuerpo del correo electrónico 2
from email.mime.text import MIMEText #Creación del cuerpo del correo electrónico 3
from shutil import rmtree #Gestión de directorios en el servidor
import smtplib #Conexión con el servidor de correo
from rpy2.robjects import r #Interfaz entre PYTHON y R
from rpy2.robjects import numpy2ri #Interfaz entre PYTHON y R
from time import sleep #Suspensión temporal
import pandas as pd #Gestión de archivos de texto
import os #Hereda funciones del sistema operativo para su uso en PYTHON
import base64 #Codifica contenido en base64 para su almacenamiento en una WEB
import pymssql #Interfaz de conexión con la base de datos
import Doc_latex #Gestión de documentos en LATEX en PYTHON debe tener preinstalado MIKTEX
'''---Componentes y lbrería de elaboración propia---'''
import Diseno_inicial #Calculo preliminar de la hornilla
import Costos_funcionamiento #Calculo del costo financiero de la hornilla
import Pailas #Calculo de las dimensiones de las pailas
import Gases #Calculo de las propiedades de los gases
import Areas #Calcular Areas
import threading
import numpy as np
#Generación de la interfaz WEB
app = Flask(__name__)
#Creación de directorio temporal para almacenar archivos
uploads_dir = os.path.join(app.instance_path, 'uploads')
try:
os.makedirs(uploads_dir, True)
except OSError:
print('Directorio existente')
def Espera():
global Lista_clientes
global Estado_Cliente
i=0
while True:
sleep(280)
j=len(Lista_clientes)
print(j)
while j>0 and Estado_Cliente==False:
#Limpiar directorios de uso temporal
try:
rmtree('static/Temp')
os.mkdir('static/Temp')
except:
os.mkdir('static/Temp')
try:
rmtree('static/pdf01')
rmtree('static/pdf02')
os.mkdir('static/pdf01')
os.mkdir('static/pdf02')
except:
os.mkdir('static/pdf01')
os.mkdir('static/pdf02')
try:
generar_valores_informe(Lista_clientes[0][0])
sleep(10)
Lista_clientes.pop(0)
sleep(10)
i=i+1
except:
print("Error")
j=0
j=0
i=0
'''---Funciones de direccionamiento en la interfaz WEB---'''
#Eliminar datos cargados en cache al actualizar la página.
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate, public, max-age=0"
response.headers["Expires"] = '0'
response.headers["Pragma"] = "no-cache"
return response
#Directorio raíz (página principal)
@app.route('/')
def index():
try:
rmtree('static/pdf2')
except OSError:
print('Directorio eliminado')
return render_template('principal.html')
#Formulario para el ingreso de datos de usuario
@app.route('/usuario')
def usua():
global df
global paises
global Deptos_cana
global Ciudad_cana
global Tipo_cana
global Grados_Bx
global Nivel_pH
global Nivel_azucar
global Nivel_Sacarosa
global Nivel_pureza
global Nivel_Fosforo
global Nivel_Calidad
global Nivel_brpane
global Cana_ha
global Periodo
df = pd.read_json("static/Catalogos/Colombia.json")
paises = pd.read_excel("static/Catalogos/Paises.xlsx", engine='openpyxl')
cana = pd.read_excel("static/Catalogos/Variedades.xlsx", engine='openpyxl')
Deptos_cana = cana['Depto'].values
Ciudad_cana = cana['Ciudad'].values
Tipo_cana = cana['Tipo'].values
Grados_Bx = cana['Br'].values
Nivel_pH = cana['pH'].values
Nivel_azucar = cana['Azucares'].values
Nivel_Sacarosa = cana['Sacarosa'].values
Nivel_pureza = cana['Pureza'].values
Nivel_Fosforo = cana['Forforo'].values
Nivel_Calidad = cana['Calidad'].values
Nivel_brpane = cana['BrPanela'].values
Cana_ha = cana['ProduccionCana'].values
Periodo = cana['Periodo'].values
Variedad_cana = []
for i in range(0, len(Deptos_cana)):
if(i==0):
Variedad_cana.append(Tipo_cana[i]+", -Valor por defecto-, rendimiento= "+str(Cana_ha[i])+ ", periodo vegetativo= "+str(Periodo[i]))
else:
Variedad_cana.append(Tipo_cana[i]+", Disponible en: "+Deptos_cana[i]+"-"+Ciudad_cana[i]+", rendimiento= "+str(Cana_ha[i])+ ", periodo vegetativo= "+str(Periodo[i]))
return render_template('usuario.html',
paises_lista=paises['Nombre'],
departamentos=df.departamento,
provincia=df.ciudades,
Ciudad_cana_1=Ciudad_cana,
Variedad_cana_1=Variedad_cana,
)
#Codificar los pdf en formato de texto plano
def Crear_archivo_base_64(ruta):
with open(ruta, 'rb') as Archivo_codificado_1:
Archivo_binario_1 = Archivo_codificado_1.read()
Archivo_binario_64_1 = base64.b64encode(Archivo_binario_1)
Mensaje_base_64_1 = Archivo_binario_64_1.decode('utf-8')
return Mensaje_base_64_1
#De-codificar los pdf en formato de texto plano
def Leer_pdf_base64(Nombre_pdf, Texto_base64):
PDF_Base64 = Texto_base64.encode('utf-8')
with open(Nombre_pdf, 'wb') as Archivo_Normal:
Archivo_deco = base64.decodebytes(PDF_Base64)
Archivo_Normal.write(Archivo_deco)
def Enviar_msn(Correo):
try:
# Crear el objeto mensaje
mensaje = MIMEMultipart()
mensaje['From'] = '<EMAIL>' #Correo de prueba para enviar algo desde la página
mensaje['To'] = Correo #Correo funcionario a cargo
mensaje['Subject'] = 'Informe generado con HornillAPP' #Correo funcionario a cargo
#Cuerpo del mensaje
msn = ('Cordial saludo.\n En este correo encontrara el resultado generado con HornillAPP a través de un informe.\n Atentamente: Equipo técnico de AGROSAVIA.')
mensaje.attach(MIMEText(msn, 'plain'))
# Adjuntar el archivo dado por el usuario
# Estructura para adjuntar un archivo usando flask y HTML desde la raiz del directorio
archivo_adjunto = MIMEApplication(open('static/Informe.pdf',"rb").read())
archivo_adjunto.add_header('Content-Disposition', 'attachment', filename='Informe.pdf')
mensaje.attach(archivo_adjunto)
# Datos de acceso a la cuenta de usuario
usuario ='<EMAIL>'
contrasena='<PASSWORD>@'
#Interfaz de conexión con el servidor de gmail
servidor = smtplib.SMTP('correo.agrosavia.co:587')
servidor.starttls()
servidor.login(usuario, contrasena)
servidor.sendmail(mensaje['From'], mensaje['To'], mensaje.as_string())
servidor.quit()
except:
print('No se pudo enviar el informe')
def Diseño_Hornilla(Nombre_Rot, Ite):
global Diccionario
global Diccionario_2
conta=0
while(Ite==0 and conta<3):
"""------------>>>>>>>>>>HORNILLA<<<<<<<<<<<<<<<<----------------"""
"""Calculo de la hornilla (Diseño inicial)"""
Diccionario = Diseno_inicial.datos_entrada(Diccionario,0,0)
Diccionario_2 = Diseno_inicial.Calculo_por_etapas(Diccionario)
Gases.diccionarios_sis(Diccionario,Diccionario_2)
Calor_0=Diccionario_2['Calor Nece Calc por Etapa [kW]']
Vo=np.ones(int(Diccionario_2['Etapas']))
Gases.Propiedades(Calor_0,Vo,Vo,Vo)
"""Calcular volumenes iniciales"""
Dimensi_Pail = Pailas.Mostrar_pailas(Diccionario_2['Volumen de jugo [m^3/kg]'],
#Diccionario_2['Volumen de jugo [L]'],
int(Diccionario_2['Etapas']),
Nombre_Rot,
Diccionario['Tipo de cámara de combustión'],
Diccionario['Capacidad estimada de la hornilla'],
altura_media,
Diccionario)
"""Optimizar valores"""
L_temp = Areas.Areas_lisas(Dimensi_Pail)
Gases.Propiedades(Calor_0,L_temp[0],L_temp[1],L_temp[2])
Gases.Optimizacion(Diccionario, Diccionario_2, L_temp)
if(float(Diccionario['Bagazo suministrado'])<float(Diccionario['Bagazo seco'])):
Ite=1
conta=conta+1
#Función para crear los diccionarios a partir de los calculos de la aplicación
def generar_valores_informe(Cliente_actual):
#----------->>>>>>>>>>>Variables globales<<<<<<<<<<<<<<<---------
global df
global altura_media
global NivelFre
global Formulario_1_Etiquetas
global Formulario_1_Valores
global Formulario_2_Etiquetas
global Formulario_2_Valores
global Formulario_2a_Etiquetas
global Formulario_2a_Valores
global Directorio
global Deptos_cana
global Ciudad_cana
global Tipo_cana
global Grados_Bx
global Nivel_pH
global Nivel_azucar
global Nivel_Sacarosa
global Nivel_pureza
global Nivel_Fosforo
global Nivel_Calidad
global Nivel_brpane
global Cana_ha
global Diccionario
global Diccionario_2
global Diccionario_3
global Diccionario_4
result=Cliente_actual
"""Creación de la primer parte del diccionario (leer del formulario de usuario)"""
Pais_sel=result.get('Pais')
if(Pais_sel=='Colombia'):
a=result.to_dict()
Dept=result.get('Departamento')
D_aux=df.departamento
D_aux=D_aux.tolist()
amsnm=df.altura
amsnm=amsnm.tolist()
H2O=df.aguasubterranea
H2O=H2O.tolist()
altura_media=float(result.get('Altura'))#amsnm[D_aux.index(Dept)]
#print(altura_media)
#NivelFre='Minimo 4 metros'#H2O[D_aux.index(Dept)]
Nombre_Rot="Hornilla: "+a['Nombre de usuario']+" ("+a['Departamento']+'-'+a['Ciudad']+")"
else:
a=result.to_dict()
altura_media=200
#NivelFre='Minimo 4 metros'
a['Departamento']='--'
a['Ciudad']='--'
Nombre_Rot="Hornilla: "+a['Nombre de usuario']+" ("+a['Pais']+")"
#---------------->>>>>>>>>"""Cálculo del periodo vegetativo"""<<<<<<<<<<<<<<<<<<<
Formulario_1_Etiquetas=[]
Formulario_1_Valores=[]
aux_i=' '
for i in a:
if(i!='x' and i!='y' and i!='Panela producida por hora [kg/hora]'
and i!='Variedades de caña sembrada' and i!='Conoce el rendimiento de la caña'
and i!='Conece las variedades de caña'
):
try:
if(i!='Telefono'):
aux_i=str(round(float(a[i]),3))
else:
aux_i=a[i]
except:
aux_i=a[i]
Formulario_1_Etiquetas.append(i)
Formulario_1_Valores.append(aux_i)
Formulario_1_Etiquetas.append('Altura media sobre el nivel del mar')
Formulario_1_Valores.append(str(altura_media)+' m')
Formulario_1_Etiquetas.append('Nivel freático requerido')
Formulario_1_Valores.append('Mínimo 4 metros')
"""Creación de la segunda parte del diccionario"""
a=result.to_dict()
cantidadcanas=int(a['Variedades de caña sembrada'])+1
Formulario_2_Etiquetas=[]
Formulario_2_Valores=[]
Formulario_2a_Etiquetas=[]
Formulario_2a_Valores=[]
Directorio =[]
G_brix_cana=0.0
G_brix_panela=0.0
ha_cana_conta=0.0
Periodo_v=0
for contacana in range(1,cantidadcanas):
try:
Valor_cana_buscar='Variedad de Caña '+str(contacana)
index=int(a[Valor_cana_buscar])-1
Formulario_2_Etiquetas.append(Valor_cana_buscar)
Formulario_2_Valores.append(Tipo_cana[index])
Formulario_2_Etiquetas.append('Grados Brix de la caña '+str(contacana))
Formulario_2_Valores.append(Grados_Bx[index])
Formulario_2_Etiquetas.append('pH')
Formulario_2_Valores.append(Nivel_pH[index])
Formulario_2_Etiquetas.append('Azúcares reductores (%)')
Formulario_2_Valores.append(Nivel_azucar[index])
Formulario_2_Etiquetas.append('Sacarosa (%)')
Formulario_2_Valores.append(Nivel_Sacarosa[index])
Formulario_2_Etiquetas.append('Pureza (%)')
Formulario_2_Valores.append(Nivel_pureza[index])
Formulario_2_Etiquetas.append('Fósforo (ppm)')
Formulario_2_Valores.append(Nivel_Fosforo[index])
#Formulario_2_Etiquetas.append('Grados Brix de la panela '+str(contacana))
#Formulario_2_Valores.append(Nivel_brpane[index])
Formulario_2_Etiquetas.append('>---------------------------------<')
Formulario_2_Valores.append('>---------------------------------<')
G_brix_cana=G_brix_cana+float(Grados_Bx[index])
G_brix_panela=G_brix_panela+float(Nivel_brpane[index])
Directorio.append('Cana/'+Tipo_cana[index]+'.png')
ha_cana_conta=ha_cana_conta+float(Cana_ha[index])
Periodo_v=float(Periodo[index])+Periodo_v
except:
print("Variedad no disponible")
ha_cana_conta_p=ha_cana_conta/(cantidadcanas-1)
Periodo_v=Periodo_v/(cantidadcanas-1)
#FORMULARIO 2
#Exportar variedades de caña seleccionadas
datos_temp=[Formulario_2_Etiquetas,Formulario_2_Valores]
df1 = | pd.DataFrame(datos_temp) | pandas.DataFrame |
from scrapers import scraper_modules as sm
from bs4 import BeautifulSoup
import pandas as pd
wta_link = 'https://www.wta.org/go-outside/hikes?b_start:int='
def get_list_of_peak_info(html: str):
html_soup = BeautifulSoup(html, 'html.parser')
a_tags = html_soup.find_all('a', attrs={'class': 'listitem-title'})
return a_tags
def create_list_of_links():
list_of_links = []
i = 0
while i < 3721:
list_of_links.append(wta_link + str(i))
i += 30
return list_of_links
def create_csv():
links = create_list_of_links()
a_tags = []
for link in links:
html = sm.download_html(link)
a_tags = a_tags + get_list_of_peak_info(html)
print(link)
peaks_dict = sm.create_peaks_dict(a_tags, 'span', title_retrieval_method='find')
df = | pd.DataFrame(peaks_dict) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
from scipy.stats import norm
from sklearn import mixture
from rnaseq_lib.math.dists import name_from_dist, DISTRIBUTIONS
# Outlier
def iqr_bounds(ys):
"""
Return upper and lower bound for an array of values
Lower bound: Q1 - (IQR * 1.5)
Upper bound: Q3 + (IQR * 1.5)
:param list ys: List of values to calculate IQR
:return: Upper and lower bound
:rtype: tuple(float, float)
"""
quartile_1, quartile_3 = np.percentile(ys, [25, 75])
iqr = quartile_3 - quartile_1
lower_bound = quartile_1 - (iqr * 1.5)
upper_bound = quartile_3 + (iqr * 1.5)
return upper_bound, lower_bound
# Normalization
def min_max_normalize(df):
return (df - df.min()) / (df.max() - df.min())
def mean_normalize(df):
return (df - df.mean()) / df.std()
def softmax(df):
"""
Normalizes columns to sum to 1
:param pd.DataFrame df: Dataframe to normalize
:return: Normalized DataFrame
:rtype: pd.DataFrame
"""
return df.divide(df.sum())
def l2norm(x, pad=0.001):
"""
Log2 normalization function
:param float x: Input value
:param int|float pad: Pad value (to handle zeros)
:return: log2(x+1) normalized value
:rtype: float
"""
return np.log2(x + pad)
# Distributions
def run_ks(source_dist, dists=DISTRIBUTIONS):
"""
Runs Kolmogorov-Smirnov test for the provided source distribution against provided scipy distribution funcs
:param np.array source_dist: Distribution to test
:param list(func) dists: List of scipy.stats distributions to test. Defaults to list containing most.
:return: Dataframe of KS-test results
:rtype: pd.DataFrame
"""
rows = []
for dist in dists:
kstat, pval = stats.kstest(source_dist, name_from_dist(dist), args=dist.fit(source_dist))
rows.append((name_from_dist(dist), kstat, pval))
return | pd.DataFrame(rows, columns=['Name', 'KS-stat', 'Pvalue']) | pandas.DataFrame |
import os
import random
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
import torch
from sklearn.metrics import pairwise_distances
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset
import matplotlib.pyplot as plt
from scripts.ssc.evaluation.mldl_copied import CompPerformMetrics
from src.datasets.datasets import SwissRoll, SwissRoll_manifold
from src.evaluation.eval import Multi_Evaluation
from src.models.COREL.eval_engine import get_latentspace_representation
from src.models.WitnessComplexAE.wc_ae import WitnessComplexAutoencoder
from src.models.autoencoder.autoencoders import Autoencoder_MLP_topoae
def update_dict(dict, ks, metric, result):
for i, k in enumerate(ks):
dict.update({metric+'_k{}'.format(k): result[metric][i]})
return dict
def plot_dist_comparison(Z_manifold, Z_latent, labels, path_to_save = None,name = None):
print('normalize x,y')
Z_manifold[:, 0] = (Z_manifold[:,0]-Z_manifold[:,0].min())/(Z_manifold[:,0].max()-Z_manifold[:,0].min())
Z_manifold[:, 1] = (Z_manifold[:,1]-Z_manifold[:,1].min())/(Z_manifold[:,1].max()-Z_manifold[:,1].min())
Z_latent[:, 0] = (Z_latent[:,0]-Z_latent[:,0].min())/(Z_latent[:,0].max()-Z_latent[:,0].min())
Z_latent[:, 1] = (Z_latent[:,1]-Z_latent[:,1].min())/(Z_latent[:,1].max()-Z_latent[:,1].min())
manifold = pd.DataFrame({'x': Z_manifold[:, 0], 'y': Z_manifold[:, 1],'label': labels})
latents = pd.DataFrame({'x': Z_latent[:, 0], 'y': Z_latent[:, 1],'label': labels})
print('compute distances')
pwd_Z = pairwise_distances(Z_eval, Z_eval, n_jobs=2)
pwd_Ztrue = pairwise_distances(data_manifold, data_manifold, n_jobs=2)
print('normalize distances')
#normalize distances
pwd_Ztrue = (pwd_Ztrue-pwd_Ztrue.min())/(pwd_Ztrue.max()-pwd_Ztrue.min())
pwd_Z = (pwd_Z-pwd_Z.min())/(pwd_Z.max()-pwd_Z.min())
print('flatten')
#flatten
pwd_Ztrue = pwd_Ztrue.flatten()
pwd_Z = pwd_Z.flatten()
ind = random.sample(range(len(pwd_Z)), 2**12)
distances = pd.DataFrame({'Distances on $\mathcal{M}$': pwd_Ztrue[ind], 'Distances in $\mathcal{Z}$': pwd_Z[ind]})
print('plot')
#plot
fig, ax = plt.subplots(1,3, figsize=(3*10, 10))
sns.scatterplot(x = 'Distances on $\mathcal{M}$', y = 'Distances in $\mathcal{Z}$',data = distances, ax = ax[1], edgecolor = None,alpha=0.3)
#ax[0].set(xlabel='Distances on $\mathcal{M}$', ylabel='Distances in $\mathcal{Z}$',fontsize=25)
ax[1].xaxis.label.set_size(20)
ax[1].yaxis.label.set_size(20)
ax[1].set_title('Comparison of pairwise distances',fontsize=24,pad=20)
sns.scatterplot(y = 'x', x = 'y', hue='label', data = manifold,ax = ax[0],palette=plt.cm.viridis, marker=".", s=80,
edgecolor="none", legend=False)
ax[0].set_title('True manifold ($\mathcal{M}$)',fontsize=24,pad=20)
ax[0].set(xlabel="", ylabel="")
ax[0].set_yticks([])
sns.scatterplot(x = 'x', y = 'y',hue='label', data = latents,ax = ax[2],palette=plt.cm.viridis, marker=".", s=80,
edgecolor="none", legend=False)
ax[2].set_title('Latent space ($\mathcal{Z}$)',fontsize=24,pad=20)
ax[2].set(xlabel="", ylabel="")
ax[2].set_yticks([])
fig.tight_layout(pad=5)
if path_to_save != None and name != None:
print('save plot')
fig.savefig(os.path.join(path_to_save,'{}_4.pdf'.format(name)),dpi = 100)
plt.show()
plt.close()
return (np.square(pwd_Ztrue - pwd_Z)).mean()
def plot_dist_comparison2(Z_manifold, Z_latent, labels, path_to_save = None,name = None):
print('normalize x,y')
Z_manifold[:, 0] = (Z_manifold[:,0]-Z_manifold[:,0].min())/(Z_manifold[:,0].max()-Z_manifold[:,0].min())
Z_manifold[:, 1] = (Z_manifold[:,1]-Z_manifold[:,1].min())/(Z_manifold[:,1].max()-Z_manifold[:,1].min())
Z_latent[:, 0] = (Z_latent[:,0]-Z_latent[:,0].min())/(Z_latent[:,0].max()-Z_latent[:,0].min())
Z_latent[:, 1] = (Z_latent[:,1]-Z_latent[:,1].min())/(Z_latent[:,1].max()-Z_latent[:,1].min())
manifold = | pd.DataFrame({'x': Z_manifold[:, 0], 'y': Z_manifold[:, 1],'label': labels}) | pandas.DataFrame |
# Lint as: python3
"""Tests for main_heatmap."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import main_heatmap
import numpy as np
import pandas as pd
SAMPLE_LOGS_LINK = 'https://console.cloud.google.com/logs?project=xl-ml-test&advancedFilter=resource.type%3Dk8s_container%0Aresource.labels.project_id%3Dxl-ml-test%0Aresource.labels.location=us-central1-b%0Aresource.labels.cluster_name=xl-ml-test%0Aresource.labels.namespace_name=automated%0Aresource.labels.pod_name:pt-1.5-cpp-ops-func-v2-8-1587398400&dateRangeUnbound=backwardInTime'
def _get_values_for_failures(values, statuses):
return [zipped[0] for zipped in zip(
values, statuses) if zipped[1] == 'failure']
class MainHeatmapTest(parameterized.TestCase):
@parameterized.named_parameters(
('all_success_all_oob', {
'job_statuses': ['success', 'success', 'success'],
'metric_statuses': ['failure', 'failure', 'failure'],
'expected_overall_statuses': ['failure', 'failure', 'failure'],
'expected_job_status_abbrevs': ['M', 'M', 'M']}),
('all_success_some_oob', {
'job_statuses': ['success', 'success', 'success'],
'metric_statuses': ['failure', 'failure', 'success'],
'expected_overall_statuses': ['failure', 'failure', 'success'],
'expected_job_status_abbrevs': ['M', 'M', '']}),
('all_success_none_oob', {
'job_statuses': ['success', 'success', 'success'],
'metric_statuses': ['success', 'success', 'success'],
'expected_overall_statuses': ['success', 'success', 'success'],
'expected_job_status_abbrevs': ['', '', '']}),
('some_success_some_oob', {
'job_statuses': ['success', 'failure', 'success'],
'metric_statuses': ['success', 'success', 'failure'],
'expected_overall_statuses': ['success', 'failure', 'failure'],
'expected_job_status_abbrevs': ['', 'F', 'M']}),
)
def test_process_dataframes(self, args_dict):
job_statuses = args_dict['job_statuses']
metric_statuses = args_dict['metric_statuses']
assert len(job_statuses) == len(metric_statuses)
job_status_df = pd.DataFrame({
'test_name': pd.Series(['test{}'.format(n) for n in range(
len(job_statuses))]),
'run_date': pd.Series(['2020-04-{:02d}'.format(n) for n in range(
len(job_statuses))]),
'job_status': pd.Series(job_statuses),
'logs_link': pd.Series([SAMPLE_LOGS_LINK for _ in job_statuses]),
'logs_download_command': pd.Series(
['my command'] + ['' for _ in job_statuses[1:]]),
})
# The SQL query in the real code only returns rows where metrics were
# out of bounds. These oobs rows correspond to 'failure' for
# metric_statuses in this test.
metric_names = ['acc' if n % 2 else 'loss' for n in range(
len(job_status_df))]
metric_values = [98.0 if n % 2 else 0.6 for n in range(
len(job_status_df))]
metric_upper_bounds = [np.nan if n % 2 else 0.5 for n in range(
len(job_status_df))]
metric_lower_bounds = [99.0 if n % 2 else np.nan for n in range(
len(job_status_df))]
metric_status_df = pd.DataFrame({
'test_name': pd.Series(_get_values_for_failures(
job_status_df['test_name'].tolist(), metric_statuses)),
'run_date': pd.Series(_get_values_for_failures(
job_status_df['run_date'].tolist(), metric_statuses)),
'metric_name': pd.Series(_get_values_for_failures(
metric_names, metric_statuses)),
'metric_value': pd.Series(_get_values_for_failures(
metric_values, metric_statuses)),
'metric_upper_bound': pd.Series(_get_values_for_failures(
metric_upper_bounds, metric_statuses)),
'metric_lower_bound': pd.Series(_get_values_for_failures(
metric_lower_bounds, metric_statuses)),
})
# Process the dataframes and make sure the overall_status matches
# the expected overall_status.
df = main_heatmap.process_dataframes(job_status_df, metric_status_df)
self.assertEqual(df['overall_status'].tolist(),
args_dict['expected_overall_statuses'])
self.assertEqual(df['job_status_abbrev'].tolist(),
args_dict['expected_job_status_abbrevs'])
# We only want to display metrics as a top-level failure if the job
# succeeded. For failed jobs, it's not so helpful to know that the
# metrics were out of bounds.
metrics_failure_explanations = df['failed_metrics'].tolist()
for i, expl_list in enumerate(metrics_failure_explanations):
job_status = job_statuses[i]
metric_status = metric_statuses[i]
if job_status == 'success' and metric_status == 'failure':
self.assertGreaterEqual(len(expl_list), 1)
for expl in expl_list:
self.assertTrue('outside' in expl)
else:
self.assertFalse(expl_list)
commands = df['logs_download_command'].tolist()
# If the command is already populated, it should be left alone.
self.assertEqual(commands[0], 'my command')
def test_process_dataframes_no_job_status(self):
job_status_df = pd.DataFrame({
'test_name': pd.Series(['a', 'b']),
'run_date': pd.Series(['2020-04-10', '2020-04-11']),
'logs_link': pd.Series(['c', 'd']),
'logs_download_command': pd.Series(['e', 'f']),
})
df = main_heatmap.process_dataframes(job_status_df, pd.DataFrame())
self.assertTrue(df.empty)
df = main_heatmap.process_dataframes(pd.DataFrame(), pd.DataFrame())
self.assertTrue(df.empty)
def test_make_plot(self):
input_df = pd.DataFrame({
'test_name': pd.Series(['test1', 'test2', 'test3']),
'run_date': | pd.Series(['2020-04-21', '2020-04-20', '2020-04-19']) | pandas.Series |
import numpy as np
import pandas as pd
import datetime as dt
import pickle
import bz2
from .analyzer import summarize_returns
DATA_PATH = '../backtest/'
class Portfolio():
"""
Portfolio is the core class for event-driven backtesting. It conducts the
backtesting in the following order:
1. Initialization:
Set the capital base we invest and the securities we
want to trade.
2. Receive the price information with .receive_price():
Insert the new price information of each securities so that the
Portfolio class will calculated and updated the relevant status such
as the portfolio value and position weights.
3. Rebalance with .rebalance():
Depending on the signal, we can choose to change the position
on each securities.
4. Keep position with .keep_position():
If we don't rebalance the portfolio, we need to tell it to keep
current position at the end of the market.
Example
-------
see Vol_MA.ipynb, Vol_MA_test_robustness.ipynb
Parameters
----------
capital: numeric
capital base we put into the porfolio
inception: datetime.datetime
the time when we start backtesting
components: list of str
tikers of securities to trade, such as ['AAPL', 'MSFT', 'AMZN]
name: str
name of the portfolio
is_share_integer: boolean
If true, the shares of securities will be rounded to integers.
"""
def __init__(self, capital, inception, components,
name='portfolio', is_share_integer=False):
# -----------------------------------------------
# initialize parameters
# -----------------------------------------------
self.capital = capital # initial money invested
if isinstance(components, str):
components = [components] # should be list
self.components = components # equities in the portfolio
# self.commission_rate = commission_rate
self.inception = inception
self.component_prices = pd.DataFrame(columns=self.components)
self.name = name
self.is_share_integer = is_share_integer
# self.benchmark = benchmark
# -----------------------------------------------
# record portfolio status to series and dataFrames
# -----------------------------------------------
# temoprary values
self._nav = pd.Series(capital,index=[inception])
self._cash = pd.Series(capital,index=[inception])
self._security = pd.Series(0,index=[inception])
self._component_prices = pd.DataFrame(columns=self.components) # empty
self._shares = pd.DataFrame(0, index=[inception], columns=self.components)
self._positions = pd.DataFrame(0, index=[inception], columns=self.components)
self._weights = pd.DataFrame(0, index=[inception], columns=self.components)
self._share_changes = pd.DataFrame(columns=self.components) # empty
self._now = self.inception
self._max_nav = pd.Series(capital,index=[inception])
self._drawdown = pd.Series(0, index=[inception])
self._relative_drawdown = pd.Series(0, index=[inception])
# series
self.nav_open = pd.Series()
self.nav_close = | pd.Series() | pandas.Series |
import datetime
import pandas as pd
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
def plot_team(team):
years = [2012,2013,2014,2015,2016,2017]
g = pd.read_csv("audl_elo.csv")
dates = pd.to_datetime(g[(g["team_id"] == team)]["date"])
elo = g[(g["team_id"] == team)]["elo_n"]
plt.plot(dates,elo)
plt.show()
def plot_team_b(team):
years = [2012,2013,2014,2015,2016,2017]
g = pd.read_csv("audl_elo.csv")
fig, axs = plt.subplots(1,len(years),sharey=True)
for i in range(len(axs)):
#Plotting
dates = pd.to_datetime(g[(g["team_id"] == team) & (g["year_id"] == years[i])]["date"])
elo = g[(g["team_id"] == team) & (g["year_id"] == years[i])]["elo_n"]
axs[i].plot(dates,elo)
#Formatting
axs[i].xaxis.set_ticks_position('none')
axs[i].set_xlabel(str(years[i]))
axs[i].tick_params('x',labelbottom=False)
axs[i].set_ylim(1050,1950)
if i == 0:
axs[i].yaxis.tick_left()
axs[i].set_yticks(range(1100,2000,100))
if i != len(axs)-1:
axs[i].spines['right'].set_visible(False)
if i != 0:
axs[i].yaxis.set_ticks_position('none')
axs[i].spines['left'].set_visible(False)
plt.show()
def plot_teams(teams):
years = [2012,2013,2014,2015,2016,2017]
g = pd.read_csv("audl_elo.csv")
#plt.style.use('fivethirtyeight')
fig, axs = plt.subplots(1,len(years),sharey=True)
for i in range(len(axs)):
season_start = pd.to_datetime(g[(g["year_id"] == years[i])]["date"]).min() - datetime.timedelta(7)
season_end= pd.to_datetime(g[(g["year_id"] == years[i])]["date"]).max()
#Plotting
colors = ['b','g','r','c','m','y','k']
for j,team in enumerate(teams):
dates = pd.to_datetime(g[(g["team_id"] == team) & (g["year_id"] == years[i])]["date"])
if dates.shape[0] > 0:
dates = pd.Series(season_start).append(dates)
elo = g[(g["team_id"] == team) & (g["year_id"] == years[i])]["elo_n"]
if elo.shape[0] > 0:
start_elo = g[(g["team_id"] == team) & (g["year_id"] == years[i])]["elo_i"].iloc[0]
elo = | pd.Series(start_elo) | pandas.Series |
"""
Utility functions for gene annotation
"""
import logging
import re
import urllib
from io import StringIO
import pandas as pd
def cog2str(cog):
"""
Get the full description for a COG category letter
Parameters
----------
cog : str
COG category letter
Returns
-------
str
Description of COG category
"""
cog_dict = {
"A": "RNA processing and modification",
"B": "Chromatin structure and dynamics",
"C": "Energy production and conversion",
"D": "Cell cycle control, cell division, chromosome partitioning",
"E": "Amino acid transport and metabolism",
"F": "Nucleotide transport and metabolism",
"G": "Carbohydrate transport and metabolism",
"H": "Coenzyme transport and metabolism",
"I": "Lipid transport and metabolism",
"J": "Translation, ribosomal structure and biogenesis",
"K": "Transcription",
"L": "Replication, recombination and repair",
"M": "Cell wall/membrane/envelope biogenesis",
"N": "Cell motility",
"O": "Post-translational modification, protein turnover, and chaperones",
"P": "Inorganic ion transport and metabolism",
"Q": "Secondary metabolites biosynthesis, transport, and catabolism",
"R": "General function prediction only",
"S": "Function unknown",
"T": "Signal transduction mechanisms",
"U": "Intracellular trafficking, secretion, and vesicular transport",
"V": "Defense mechanisms",
"W": "Extracellular structures",
"X": "No COG annotation",
"Y": "Nuclear structure",
"Z": "Cytoskeleton",
}
return cog_dict[cog]
def _get_attr(attributes, attr_id, ignore=False):
"""
Helper function for parsing GFF annotations
Parameters
----------
attributes : str
Attribute string
attr_id : str
Attribute ID
ignore : bool
If true, ignore errors if ID is not in attributes (default: False)
Returns
-------
str, optional
Value of attribute
"""
try:
return re.search(attr_id + "=(.*?)(;|$)", attributes).group(1)
except AttributeError:
if ignore:
return None
else:
raise ValueError("{} not in attributes: {}".format(attr_id, attributes))
def gff2pandas(gff_file, feature="CDS", index=None):
"""
Converts GFF file(s) to a Pandas DataFrame
Parameters
----------
gff_file : str or list
Path(s) to GFF file
feature: str or list
Name(s) of features to keep (default = "CDS")
index : str, optional
Column or attribute to use as index
Returns
-------
df_gff: ~pandas.DataFrame
GFF formatted as a DataFrame
"""
# Argument checking
if isinstance(gff_file, str):
gff_file = [gff_file]
if isinstance(feature, str):
feature = [feature]
result = []
for gff in gff_file:
with open(gff, "r") as f:
lines = f.readlines()
# Get lines to skip
skiprow = sum([line.startswith("#") for line in lines])
# Read GFF
names = [
"accession",
"source",
"feature",
"start",
"end",
"score",
"strand",
"phase",
"attributes",
]
DF_gff = | pd.read_csv(gff, sep="\t", skiprows=skiprow, names=names, header=None) | pandas.read_csv |
import dash
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import pandas as pd
import plotly.express as px
import plotly.graph_objs as go
from datetime import date
import dash_loading_spinners as dls
from dash.dependencies import Input, Output, ClientsideFunction, State
from app import app
import requests
features = ["Screw Speed", "Gas Flow Rate", "Steam Pressure", "Oven-Home Temperature",
"Water Temperature", "Oxygen_pct", "Oven-Home Pressure", "Combustion Air Pressure",
"Temperature before prear", "Temperature after prear", "Burner Position", "Burner_pct",
"Borra Flow Rate_kgh", "Cisco Flow Rate_kgh"]
cardtab_1 = dbc.Card([
html.Div(
id='output-container-date-picker-range',
className="month-container"
),
dls.Hash(
dcc.Graph(id="graph-steam", className = "graph-card"),
size = 160,
speed_multiplier = 0.8,
debounce = 200
)
])
cardtab_2 = dbc.Card([
html.Div(
id='output-container-date-picker-range',
className="month-container"
),
dls.Hash(
dcc.Graph(id="graph-distribution", className = "graph-card"),
size = 160,
speed_multiplier = 0.8,
debounce = 200
)
])
card_3 = dbc.Card(
[
dbc.Col([
dbc.Col([
html.P(
"Select date range that you want to see:"
),
dcc.DatePickerRange(
id='my-date-picker-range',
min_date_allowed=date(2020, 10, 1),
max_date_allowed=date(2021, 6, 30),
initial_visible_month=date(2020, 10, 1),
end_date=date(2021, 6, 30),
clearable=True,
with_portal=True,
month_format="MMMM, YYYY",
number_of_months_shown=3
)
]),
html.Hr(),
dbc.Col([
html.P(
"Select the data frequency:"
),
dbc.RadioItems(
id='frequency-radioitems',
labelStyle={"display": "inline-block"},
options= [
{"label": "Daily", "value": "data_daily"},
{"label": "Hourly", "value": "data_hourly"}
], value= "data_daily",
style= {"color": "black"}
)
])
])
])
card_4 = dbc.Card([
dbc.Col([
dbc.FormGroup([
dbc.Label("Y - Axis"),
dcc.Dropdown(
id="y-variable",
options=[{
"label": col,
"value": col
} for col in features],
value="Gas Flow Rate",
),
]),
html.H6("Efficiency Range"),
dcc.RangeSlider(
id='slider-efficiency',
min=0,
max=1.00,
step=0.01,
value=[0, 1.00]
),
html.P(id='range-efficiency')
])
])
card_5 = dbc.Card([
html.Div(
id='output-container-date-picker-range',
className="month-container"
),
dls.Hash(
dcc.Graph(id="graph-comparison", className = "graph-card"),
size = 160,
speed_multiplier = 0.8,
debounce = 200
)
])
layout= [
html.Div([
# html.Img(
# src = "/assets/images/C1_icon_1.png",
# className = "corr-icon"
# ),
html.Img(
src = "/assets/images/Buencafe-logo.png",
className = "corr-icon"
),
html.H2(
"Steam Analytics",
className = "content-title"
),
html.Div(children=[
html.Div([
# dbc.Row([
# dbc.Col(
# dbc.Tabs([
# dbc.Tab(cardtab_1, label="Time series"),
# dbc.Tab(cardtab_2, label="Distribution"),
# ],
# id="card-tabs",
# card=True,
# active_tab="tab-1",
# ),
# width=9
# ),
# dbc.Col(
# card_3, width=3
# )
# ]),
dbc.Tabs([
dbc.Tab(cardtab_1, label="Time series"),
dbc.Tab(cardtab_2, label="Distribution"),
],
id="card-tabs",
card=True,
active_tab="tab-1",
),
card_3,
], className = "graph_col_1"),
html.Div(children =[
# dbc.Row([
# dbc.Col(
# card_4, width=3
# ),
# dbc.Col(
# card_5, width=9
# )
# ]),
card_4,
card_5
], className = "data_col_2")
], className = "wrapper__steam-data")
],className = "wrapper__steam"),
]
@app.callback(
Output('graph-steam','figure'),
[Input('my-date-picker-range', 'start_date'),
Input('my-date-picker-range', 'end_date'),
Input('frequency-radioitems', 'value')]
)
def update_figure(start_date, end_date, value_radio):
# if value_radio == "data_daily":
# data = pd.read_csv("data/data_interpolate_daily.csv", parse_dates=["Time"])
# data.set_index(["Time"], inplace=True)
# elif value_radio == "data_hourly":
# data = pd.read_csv("data/data_interpolate_hourly.csv", parse_dates=["Time"])
# data.set_index(["Time"], inplace=True)
try:
if value_radio == "data_daily":
query = "SELECT * FROM daily"
payload = {
"query": query
}
petition = requests.post('https://k8nmzco6tb.execute-api.us-east-1.amazonaws.com/dev/data',payload)
test_var = petition.json()['body']
data = pd.DataFrame(test_var)
data['Time'] = | pd.to_datetime(data['Time']) | pandas.to_datetime |
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
# GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
pd.DatetimeIndex([ts]).round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
| tm.assert_index_equal(res, exp) | pandas.util.testing.assert_index_equal |
# tests.test_regressor.test_residuals
# Ensure that the regressor residuals visualizations work.
#
# Author: <NAME> <<EMAIL>>
# Created: Sat Oct 8 16:30:39 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: test_residuals.py [7d3f5e6] <EMAIL> $
"""
Ensure that the regressor residuals visualizations work.
"""
##########################################################################
## Imports
##########################################################################
import pytest
import matplotlib.pyplot as plt
from yellowbrick.regressor.residuals import *
from tests.base import VisualTestCase
from tests.dataset import DatasetMixin, Dataset, Split
from sklearn.svm import SVR
from sklearn.linear_model import Ridge, Lasso
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split as tts
try:
import pandas as pd
except ImportError:
pd = None
##########################################################################
## Data
##########################################################################
@pytest.fixture(scope='class')
def data(request):
"""
Creates a fixture of train and test splits for the sklearn digits dataset
For ease of use returns a Dataset named tuple composed of two Split tuples.
"""
X, y = make_regression(
n_samples=500, n_features=22, n_informative=8, random_state=42
)
X_train, X_test, y_train, y_test = tts(
X, y, test_size=0.2, random_state=11
)
# Set a class attribute for digits
request.cls.data = Dataset(
Split(X_train, X_test), Split(y_train, y_test)
)
##########################################################################
## Prediction Error Test Cases
##########################################################################
@pytest.mark.usefixtures("data")
class TestPredictionError(VisualTestCase, DatasetMixin):
"""
Test the PredictionError visualizer
"""
def test_pred_error_integration(self):
"""
Integration test with image similarity on random data with SVR
"""
_, ax = plt.subplots()
visualizer = PredictionError(SVR(), ax=ax)
visualizer.fit(self.data.X.train, self.data.y.train)
visualizer.score(self.data.X.test, self.data.y.test)
visualizer.finalize()
self.assert_images_similar(visualizer, tol=10)
@pytest.mark.skipif(pd is None, reason="pandas is required")
def test_pred_error_integration_pandas(self):
"""
Test Pandas real world dataset with image similarity on Ridge
"""
_, ax = plt.subplots()
# Load the occupancy dataset from fixtures
data = self.load_data('energy')
target = 'cooling_load'
features = [
"relative_compactness", "surface_area", "wall_area", "roof_area",
"overall_height", "orientation", "glazing_area",
"glazing_area_distribution"
]
# Create instances and target
X = pd.DataFrame(data[features])
y = pd.Series(data[target].astype(float))
# Create train/test splits
splits = tts(X, y, test_size=0.2, random_state=8873)
X_train, X_test, y_train, y_test = splits
visualizer = PredictionError(Ridge(), ax=ax)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.finalize()
self.assert_images_similar(visualizer, tol=10)
def test_score(self):
"""
Assert returns R2 score
"""
visualizer = PredictionError(LinearRegression())
visualizer.fit(self.data.X.train, self.data.y.train)
score = visualizer.score(self.data.X.test, self.data.y.test)
assert score == pytest.approx(1.0)
assert visualizer.score_ == score
@pytest.mark.skip(reason="not implemented yet")
def test_peplot_shared_limits(self):
"""
Test shared limits on the peplot
"""
raise NotImplementedError("not yet implemented")
@pytest.mark.skip(reason="not implemented yet")
def test_peplot_draw_bounds(self):
"""
Test the peplot +/- one bounding in draw
"""
raise NotImplementedError("not yet implemented")
##########################################################################
## Residuals Plot Test Cases
##########################################################################
@pytest.mark.usefixtures("data")
class TestResidualsPlot(VisualTestCase, DatasetMixin):
"""
Test ResidualPlot visualizer
"""
def test_residuals_plot_integration(self):
"""
Integration test with image similarity on random data with OLS
"""
_, ax = plt.subplots()
visualizer = ResidualsPlot(LinearRegression(), ax=ax)
visualizer.fit(self.data.X.train, self.data.y.train)
visualizer.score(self.data.X.test, self.data.y.test)
visualizer.finalize()
self.assert_images_similar(visualizer, tol=10)
@pytest.mark.skipif(pd is None, reason="pandas is required")
def test_residuals_plot_integration_pandas(self):
"""
Test Pandas real world dataset with image similarity on Lasso
"""
_, ax = plt.subplots()
# Load the occupancy dataset from fixtures
data = self.load_data('energy')
target = 'heating_load'
features = [
"relative_compactness", "surface_area", "wall_area", "roof_area",
"overall_height", "orientation", "glazing_area",
"glazing_area_distribution"
]
# Create instances and target
X = | pd.DataFrame(data[features]) | pandas.DataFrame |
from simio_lisa.simio_tables import *
import logging
import pandas as pd
import os
import plotly.express as px
from plotly.offline import plot
import time
from abc import ABC, abstractmethod
class SimioPlotter(ABC):
def __init__(self,
output_tables,
logger_level: int = logging.INFO,
**kwargs):
"""
Parent class.
:param output_tables: DICT containing all tables
:param **x_axis/y_axis/time_axis: column to be used as x/y/time axis
:param **legend_col: column to use to distinguish colors/legend
:param **objects_dict: dictionary to distinguish the groups of entities to be compared together
"""
self._tables_names = None
self._tables = output_tables
# Instance Tables
self._x_axis = kwargs.get('x_axis', None)
self._y_axis = kwargs.get('y_axis', None)
self._time_axis = kwargs.get('time_axis', None)
self._objects_dict = kwargs.get('objects_dict', None)
self._legend_col = kwargs.get('legend_col', None)
logging.getLogger().setLevel(logger_level)
@abstractmethod
def plot(self, tables, kind):
"""
Force all subclasses to have a plot method
"""
pass
@property
def tables(self):
return self._tables
@property
def tables_names(self):
return self._tables_names
@property
def time_axis(self):
return self._time_axis
@time_axis.setter
def time_axis(self, new_value):
self._time_axis = new_value
@property
def y_axis(self):
return self._y_axis
@y_axis.setter
def y_axis(self, new_value):
self._y_axis = new_value
@property
def x_axis(self):
return self._x_axis
@x_axis.setter
def x_axis(self, new_value):
self._x_axis = new_value
@property
def objects_dict(self):
return self._objects_dict
@objects_dict.setter
def objects_dict(self, new_value):
self._objects_dict = new_value
@property
def legend_col(self):
return self._legend_col
@legend_col.setter
def legend_col(self, new_value):
self._legend_col = new_value
class SimioTimeSeries(SimioPlotter):
def __init__(self,
output_tables,
logger_level: int = logging.INFO,
**kwargs):
"""
Class child of SimioPlotter to plot time series. Necessary in kwargs: time_axis and y_axis.
When using plot_columns (plot_tables) y_axis (tables) can be a list.
"""
SimioPlotter.__init__(self,
output_tables,
logger_level,
**kwargs)
def plot_columns(self, table: str):
"""
Plot TimeSeries comparing different columns (y_Axis should be a list of columns,
only one table should be provided)
"""
input_data = self.tables[table]
time_axis = self.time_axis
y_axis = self.y_axis
input_data[time_axis] = pd.to_datetime(input_data[time_axis])
fig = px.line(input_data, x=time_axis, y=y_axis)
return fig
def plot_tables(self, tables):
"""
Plot TimeSeries comparing different tables (time_axis and y_axis should have the same name in all the tables)
:param tables:
:return:
"""
if type(tables) is str:
input_data = self.tables[tables]
input_data['source'] = tables
else:
input_data = pd.DataFrame()
for t in tables:
aux = self.tables[t]
aux['source'] = t
input_data = input_data.append(aux, ignore_index=True)
time_axis = self.time_axis
y_axis = self.y_axis
input_data[time_axis] = pd.to_datetime(input_data[time_axis])
fig = px.line(input_data, x=time_axis, y=y_axis, color='source')
return fig
def plot(self, tables, kind):
if kind == 'time_series_columns':
fig = self.plot_columns(table=tables)
plot(fig)
elif kind == 'time_series_tables':
fig = self.plot_tables(tables=tables)
plot(fig)
else:
raise ValueError(f'Kind {kind} not defined')
class SimioBarPie(SimioPlotter):
def __init__(self,
output_tables,
logger_level: int = logging.INFO,
**kwargs):
"""
Class child of SimioPlotter to plot bar plots or pie charts. Necessary in kwargs: x_axis, y_axis and
objects_dict (time_axis necessary only for method plot_bars_time_series)
:param objects_dict: dictionary grouping the objects to compare
"""
SimioPlotter.__init__(self,
output_tables,
logger_level,
**kwargs)
def plot_bars(self, tables: str):
if type(tables) is str:
input_data = self.tables[tables]
input_data['source'] = tables
else:
input_data = pd.DataFrame()
for t in tables:
aux = self.tables[t]
aux['source'] = t
input_data = input_data.append(aux, ignore_index=True)
y_axis = self.y_axis
x_axis = self.x_axis
object_groups_dict = self.objects_dict
input_data[y_axis] = input_data[y_axis].astype(float) # otherwise the column is lost when grouping
f = {}
for k in object_groups_dict.keys():
input_data_plt = input_data[input_data[x_axis].isin(object_groups_dict[k])].copy(deep=True)
input_data_plt = input_data_plt.groupby(by=x_axis, as_index=False).mean()
fig = px.bar(input_data_plt, x=x_axis, y=y_axis, barmode="group")
f[k] = fig
return f
def plot_pie(self, tables: str):
if type(tables) is str:
input_data = self.tables[tables]
input_data['source'] = tables
else:
input_data = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
import glob
import click
from pathlib import Path
from eye_tracking.preprocessing.functions.et_preprocess import preprocess_et
from eye_tracking.preprocessing.functions.detect_events import make_fixations, make_blinks, make_saccades
import warnings
warnings.filterwarnings("ignore")
def preprocess_eye(subj_fpath):
""" preprocess eye tracking data using code from https://github.com/teresa-canasbajo/bdd-driveratt/tree/master/eye_tracking/preprocessing
saves out preprocessed data for events, saccades, fixations
Args:
subj_fpath (str): full path to top-level directory of eye-tracking
"""
# get all sessions
ses_dirs = glob.glob(os.path.join(subj_fpath, '*ses*'))
# get subj name
subj = Path(subj_fpath).name
# loop over sessions
for ses_dir in ses_dirs:
# get sess name
sess = Path(ses_dir).name
# get all runs
run_dirs = glob.glob(os.path.join(ses_dir, '*'))
# loop over runs
for run_dir in run_dirs:
# get run name
run = Path(run_dir).name
# get preprocess dir
preprocess_dir = os.path.join(run_dir, 'preprocessed')
# check if data have already been preprocessed
if not os.path.isdir(preprocess_dir):
try:
data = preprocess_et(subject='', datapath=run_dir, surfaceMap=False, eventfunctions=(make_fixations, make_blinks, make_saccades))
# modify the msgs and save to disk
msgs_df = pd.read_csv(os.path.join(preprocess_dir, 'pl_msgs.csv'))
msgs = _modify_msgs(dataframe=msgs_df)
msgs.to_csv(os.path.join(preprocess_dir, f'{subj}_{sess}_{run}_pl_msgs.csv'))
# merge msgs to events and save to disk
events_df = pd.read_csv(os.path.join(preprocess_dir, 'pl_events.csv'))
events_msgs = _merge_msgs_events(events=events_df, msgs=msgs)
events_msgs.to_csv(os.path.join(preprocess_dir, f'{subj}_{sess}_{run}_pl_msgs_events.csv'))
# merge msgs to samples and save to disk
samples_df = pd.read_csv(os.path.join(preprocess_dir, 'pl_samples.csv'))
samples_msgs = _merge_msgs_samples(samples=samples_df, msgs=msgs)
samples_msgs.to_csv(os.path.join(preprocess_dir, f'{subj}_{sess}_{run}_pl_msgs_samples.csv'))
print('Preprocessing complete!')
except:
print('something went wrong with preprocessing ...')
else:
print('These data have already been preprocessed ...')
def concat_runs(subj_fpath):
# get all sessions
ses_dirs = glob.glob(os.path.join(subj_fpath, '*ses*'))
# get subj name
subj = Path(subj_fpath).name
df_events_all = pd.DataFrame()
df_samples_all = pd.DataFrame()
# loop over sessions
for ses_dir in np.sort(ses_dirs):
# get sess name
sess = Path(ses_dir).name
# get all runs
run_dirs = glob.glob(os.path.join(ses_dir, '*'))
# loop over runs
for run_dir in np.sort(run_dirs):
# get run name
run = Path(run_dir).name
# load preprocessed data for subj/sess
try:
df_events = pd.read_csv(os.path.join(subj_fpath, sess, run, 'preprocessed', f'{subj}_{sess}_{run}_pl_msgs_events.csv'))
df_events['subj'] = subj
df_events['sess'] = sess
df_samples = pd.read_csv(os.path.join(subj_fpath, sess, run, 'preprocessed', f'{subj}_{sess}_{run}_pl_msgs_samples.csv'))
df_samples['subj'] = subj
df_samples['sess'] = sess
# clean up
df_events = _clean_up(dataframe=df_events)
df_samples = _clean_up(dataframe=df_samples)
# concat to dataframe
df_events_all = | pd.concat([df_events_all, df_events]) | pandas.concat |
import numpy as np
import seaborn as sns
from sklearn.ensemble import RandomTreesEmbedding as rte
from sklearn.cluster.hierarchical import AgglomerativeClustering as hac
import math
import warnings
import random
import networkx as nx
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import time
import pandas as pd
import scipy.io as sio
import matplotlib.pyplot as plt
from sklearn.preprocessing import QuantileTransformer
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
import markov_clustering as mc
import community as louvain
import os
import json
from networkx.generators import community, erdos_renyi_graph
plt.style.use('seaborn') # pretty matplotlib plots
plt.rc('font', size=14)
plt.rc('figure', titlesize=18)
plt.rc('axes', labelsize=15)
plt.rc('axes', titlesize=18)
import subprocess
def ensemble_density_huge(path, sep):
cmd = ['../main', "{0}".format(path)]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
p.wait()
return(p.returncode)
def ensemble_attributes(path, sep):
cmd = ['../uet', "{0}".format(path)]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
p.wait()
return(p.returncode)
def test_clustering(n_runs=20, alpha=0.5):
nmis_both = []
nmis_attributes = []
nmis_structure = []
for i in range(n_runs):
print("Run number {0}".format(i))
ensemble_density_huge('file.csv', "'\t'")
dist_dense = pd.read_csv("./matrix.csv", delimiter="\t", header=None).values
dist_dense = dist_dense[:,:-1]
sims_attributes = ensemble_attributes("file_attributes.csv", "\t")
sim_attributes = | pd.read_csv("./matrix_uet.csv", delimiter="\t", header=None) | pandas.read_csv |
import numpy as np
import pandas as pd
# from scipy.stats import gamma
np.random.seed(181336)
number_regions = 5
number_strata = 10
number_units = 5000
units = np.linspace(0, number_units - 1, number_units, dtype="int16") + 10 * number_units
units = units.astype("str")
sample = pd.DataFrame(units)
sample.rename(columns={0: "unit_id"}, inplace=True)
sample["region_id"] = "xx"
for i in range(number_units):
sample.loc[i]["region_id"] = sample.iloc[i]["unit_id"][0:2]
sample["cluster_id"] = "xxx"
for i in range(number_units):
sample.loc[i]["cluster_id"] = sample.iloc[i]["unit_id"][0:4]
area_type = pd.DataFrame(np.unique(sample["cluster_id"]))
area_type.rename(columns={0: "cluster_id"}, inplace=True)
area_type["area_type"] = np.random.choice(("urban", "rural"), area_type.shape[0], p=(0.4, 0.6))
sample = | pd.merge(sample, area_type, on="cluster_id") | pandas.merge |
"""
Coding: UTF-8
Author: Randal
Time: 2021/2/20
E-mail: <EMAIL>
Description: This is a simple toolkit for data extraction of text.
The most important function in the script is about word frequency statistics.
Using re, I generalized the process in words counting, regardless of any preset
word segmentation. Besides, many interesting functions, like getting top sentences are built here.
All rights reserved.
"""
import xlwings as xw
import pandas as pd
import numpy as np
import os
import re
from alive_progress import alive_bar
from alive_progress import show_bars, show_spinners
import jieba
import datetime
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import math
class jieba_vectorizer(CountVectorizer):
def __init__(self, tf, userdict, stopwords, orient=False):
"""
:param tf: 输入的样本框,{axis: 1, 0: id, 1: 标题, 2: 正文, 3: 来源, 4: freq}
:param stopwords: 停用词表的路径
:param user_dict_link: 关键词清单的路径
:param orient: {True: 返回的 DTM 只包括关键词清单中的词,False: 返回 DTM 中包含全部词语}
:return: 可以直接使用的词向量样本
"""
self.userdict = userdict
self.orient = orient
self.stopwords = stopwords
jieba.load_userdict(self.userdict) # 载入关键词词典
tf = tf.copy() # 防止对函数之外的原样本框造成改动
print('切词中,请稍候……')
rule = re.compile(u'[^\u4e00-\u9fa5]') # 清洗所有样本,只保留汉字
for i in range(0, tf.shape[0]):
try:
tf.iloc[i, 2] = rule.sub('', tf.iloc[i, 2])
except TypeError:
print('样本清洗Error: doc_id = ' + str(i))
continue
if self.stopwords is not None:
stopwords = txt_to_list(self.stopwords) # 载入停用词表
else:
stopwords = []
# 开始切词
words = []
items = range(0, len(tf))
with alive_bar(len(items), force_tty=True, bar='circles') as bar:
for i, row in tf.iterrows():
item = row['正文']
result = jieba.cut(item)
# 同时过滤停用词
word = ''
for element in result:
if element not in stopwords:
if element != '\t':
word += element
word += " "
words.append(word)
bar()
# CountVectorizer() 可以自动完成词频统计,通过fit_transform生成文本向量和词袋库
# 如果需要换成 tfidfVectorizer, 把下面三行修改一下就可以了
vect = CountVectorizer()
X = vect.fit_transform(words)
self.vectorizer = vect
matrix = X
X = X.toarray()
# 二维ndarray可以展示在pycharm里,但是和DataFrame性质完全不同
# ndarray 没有 index 和 column
features = vect.get_feature_names()
XX = pd.DataFrame(X, index=tf['id'], columns=features)
self.DTM0 = matrix
self.DTM = XX
self.features = features
# # 下面是之前走的弯路,不足一哂
# words_bag = vect.vocabulary_
# # 字典的转置(注意只适用于vk一一对应的情况,1v多k请参考setdefault)
# bag_words = dict((v, k) for k, v in words_bag.items())
#
# # 字典元素的排列顺序不等于字典元素值的排列顺序
# lst = []
# for i in range(0, len(XX.columns)):
# lst.append(bag_words[i])
# XX.columns = lst
if orient:
dict_filter = txt_to_list(self.userdict)
for word in features:
if word not in dict_filter:
XX.drop([word], axis=1, inplace=True)
self.DTM_key = XX
def get_feature_names(self):
return self.features
def strip_non_keywords(self, df):
ff = df.copy()
dict_filter = txt_to_list(self.userdict)
for word in self.features:
if word not in dict_filter:
ff.drop([word], axis=1, inplace=True)
return ff
def make_doc_freq(word, doc):
"""
:param word: 指的是要对其进行词频统计的关键词
:param doc: 指的是要遍历的文本
:return: lst: 返回字典,记录关键词在文本当中出现的频次以及上下文
"""
# 使用正则表达式进行匹配, 拼接成pattern
# re.S表示会自动换行
# finditer是findall的迭代器版本,通过遍历可以依次打印出子串所在的位置
it = re.finditer(word, doc, re.S)
# match.group()可以返回子串,match.span()可以返回索引
lst = []
for match in it:
lst.append(match.span())
freq = dict()
freq['Frequency'] = len(lst)
# 将上下文结果也整理为一个字典
context = dict()
for i in range(0, len(lst)):
# 将span的范围前后各扩展不多于10个字符,得到上下文
try:
# 为了划出适宜的前后文范围,需要设定索引的最大值和最小值
# 因此要比较span+10和doc极大值,span-10和doc极小值
# 最大值在两者间取小,最小值在两者间取大
MAX = min(lst[i][1] + 10, len(doc))
MIN = max(0, lst[i][0] - 10)
# 取得上下文
context[str(i)] = doc[MIN: MAX]
except IndexError:
print('IndexError: ' + word)
freq['Context'] = context
return freq
def make_info_freq(name, pattern, doc):
"""
:param name: 指的是对其进行词频统计的形式
:param pattern: 指的是对其进行词频统计的正则表达式
:param doc: 指的是要遍历的文本
:return: lst: 返回字典,记录关键词在文本当中出现的频次以及上下文
注:该函数返回字典中的context元素为元组:(关键词,上下文)
"""
# 使用正则表达式进行匹配, 拼接成pattern
# re.S表示会自动换行
# finditer是findall的迭代器版本,通过遍历可以依次打印出子串所在的位置
it = re.finditer(pattern[0], doc, re.S)
# match.group()可以返回子串,match.span()可以返回索引
cls = pattern[1]
lst = []
for match in it:
lst.append(match.span())
freq = dict()
freq['Frequency'] = len(lst)
freq['Name'] = name
# 将上下文结果也整理为一个字典
context = dict()
for i in range(0, len(lst)):
# 将span的范围前后各扩展不多于10个字符,得到上下文
try:
# 为了划出适宜的前后文范围,需要设定索引的最大值和最小值
# 因此要比较span+10和doc极大值,span-10和doc极小值
# 最大值在两者间取小,最小值在两者间取大
MAX = min(lst[i][1] + 10, len(doc))
MIN = max(0, lst[i][0] - 10)
# 取得匹配到的关键词,并做掐头去尾处理
word = match_cut(doc[lst[i][0]: lst[i][1]], cls)
# 将关键词和上下文打包,存储到 context 条目中
context[str(i)] = (word, doc[MIN: MAX])
except IndexError:
print('IndexError: ' + name)
freq['Context'] = context
return freq
def make_docs_freq(word, docs):
"""
:param word: 指的是要对其进行词频统计的关键词
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列 (iloc: 0),正文列 (iloc: 2) 和预留出的频次列 (iloc: 4)
:return: 返回字典,其中包括“单关键词-单文本”的词频字典集合,以及计数结果汇总
"""
freq = dict()
# 因为总频数是通过"+="的方式计算,不是简单赋值,所以要预设为0
freq['Total Frequency'] = 0
docs = docs.copy() # 防止对函数之外的原样本框造成改动
for i in range(0, len(docs)):
# 对于每个文档,都形成一个字典,字典包括关键词在该文档出现的频数和上下文
# id需要在第0列,正文需要在第2列
freq['Doc' + str(docs.iloc[i, 0])] = make_doc_freq(word, docs.iloc[i, 2])
# 在给每个文档形成字典的同时,对于总概率进行滚动加总
freq['Total Frequency'] += freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
docs.iloc[i, 4] = freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
# 接下来建立一个DFC(doc-freq-context)统计面板,汇总所有文档对应的词频数和上下文
# 首先构建(id, freq)的字典映射
xs = docs['id']
ys = docs['freq']
# zip(迭代器)是一个很好用的方法,建议多用
id_freq = {x: y for x, y in zip(xs, ys)}
# 新建一个空壳DataFrame,接下来把数据一条一条粘贴进去
data = pd.DataFrame(columns=['id', 'freq', 'word', 'num', 'context'])
for item in xs:
doc = freq['Doc' + str(item)]
num = doc['Frequency']
context = doc['Context']
for i in range(0, num):
strip = {'id': item, 'freq': id_freq[item], 'word': word, 'num': i, 'context': context[str(i)]}
# 默认orient参数等于columns
# 如果字典的值是标量,那就必须传递一个index,这是规定
strip = pd.DataFrame(strip, index=[None])
# df的append方法只能通过重新赋值来进行修改
data = data.append(strip)
data.set_index(['id', 'freq', 'word'], drop=True, inplace=True)
freq['DFC'] = data
return freq
def make_infos_freq(name, pattern, docs):
"""
:param name: 指的是对其进行词频统计的形式
:param pattern: 指的是对其进行词频统计的(正则表达式, 裁剪方法)
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列(iloc: 0)和正文列(iloc: 2)
:return: 返回字典,其中包括“单关键词-单文本”的词频字典集合,以及计数结果汇总
"""
freq = dict()
# 因为总频数是通过"+="的方式计算,不是简单赋值,所以要预设为0
freq['Total Frequency'] = 0
docs = docs.copy() # 防止对函数之外的原样本框造成改动
items = range(0, len(docs))
with alive_bar(len(items), force_tty=True, bar='circles') as bar:
for i in items:
# 对于每个文档,都形成一个字典,字典包括关键词在该文档出现的频数和上下文
# id需要在第0列,正文需要在第2列
# pattern 要全须全尾地传递进去,因为make_info_freq两个参数都要用
freq['Doc' + str(docs.iloc[i, 0])] = make_info_freq(name, pattern, docs.iloc[i, 2])
# 在给每个文档形成字典的同时,对于总概率进行滚动加总
freq['Total Frequency'] += freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
docs.iloc[i, 4] = freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
bar()
# 接下来建立一个DFC(doc-freq-context)统计面板,汇总所有文档对应的词频数和上下文
# 首先构建(id, freq)的字典映射
xs = docs['id']
ys = docs['freq']
# zip(迭代器)是一个很好用的方法,建议多用
id_freq = {x: y for x, y in zip(xs, ys)}
# 新建一个空壳DataFrame,接下来把数据一条一条粘贴进去
data = pd.DataFrame(columns=['id', 'freq', 'form', 'word', 'num', 'context'])
for item in xs:
doc = freq['Doc' + str(item)]
num = doc['Frequency']
# 从(关键词,上下文)中取出两个元素
context = doc['Context']
for i in range(0, num):
# context 中的关键词已经 match_cut 完毕,不需要重复处理
strip = {'id': item, 'form': name, 'freq': id_freq[item], 'word': context[str(i)][0],
'num': i, 'context': context[str(i)][1]}
# 默认orient参数等于columns
# 如果字典的值是标量,那就必须传递一个index,这是规定
strip = pd.DataFrame(strip, index=[None])
# df的append方法只能通过重新赋值来进行修改
data = data.append(strip)
data.set_index(['id', 'freq', 'form', 'word'], drop=True, inplace=True)
freq['DFC'] = data
print(name + ' Completed')
return freq
def words_docs_freq(words, docs):
"""
:param words: 表示要对其做词频统计的关键词清单
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列、正文列、和频率列
:return: 返回字典,其中包括“单关键词-多文本”的词频字典集合,以及最终的DFC(doc-frequency-context)和DTM(doc-term matrix)
"""
freqs = dict()
# 与此同时新建一个空壳DataFrame,用于汇总DFC
data = pd.DataFrame()
# 新建一个空壳,用于汇总DTM(Doc-Term-Matrix)
dtm = pd.DataFrame(None, columns=words, index=docs['id'])
# 来吧,一个循环搞定所有
items = range(len(words))
with alive_bar(len(items), force_tty=True, bar='blocks') as bar:
for word in words:
freq = make_docs_freq(word, docs)
freqs[word] = freq
data = data.append(freq['DFC'])
for item in docs['id']:
dtm.loc[item, word] = freq['Doc' + str(item)]['Frequency']
bar()
# 记得要sort一下,不然排序的方式不对(应该按照doc id来排列)
data.sort_index(inplace=True)
freqs['DFC'] = data
freqs['DTM'] = dtm
return freqs
def infos_docs_freq(infos, docs):
"""
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列和正文列
:param infos: 指的是正则表达式的列表,格式为字典,key是示例,如“(1)”,value 是正则表达式,如“([0-9])”
:return: 返回字典,其中包括“单关键词-多文本”的词频字典集合,以及最终的DFC(doc-frequency-context)和DTM(doc-term matrix)
"""
freqs = dict()
# 与此同时新建一个空壳DataFrame,用于汇总DFC
data = pd.DataFrame()
# 新建一个空壳,用于汇总DTM(Doc-Term-Matrix)
dtm = pd.DataFrame(None, columns=list(infos.keys()), index=docs['id'])
# 来吧,一个循环搞定所有
items = range(len(infos))
with alive_bar(len(items), force_tty=True, bar='blocks') as bar:
for k, v in infos.items():
freq = make_infos_freq(k, v, docs)
freqs[k] = freq
data = data.append(freq['DFC'])
for item in docs['id']:
dtm.loc[item, k] = freq['Doc' + str(item)]['Frequency']
bar()
# 记得要sort一下,不然排序的方式不对(应该按照doc id来排列)
data.sort_index(inplace=True)
freqs['DFC'] = data
freqs['DTM'] = dtm
return freqs
def massive_pop(infos, doc):
"""
:param infos: List,表示被删除内容对应的正则表达式
:param doc: 表示正文
:return: 返回一个完成删除的文本
"""
for info in infos:
doc = re.sub(info, '', doc)
return doc
def massive_sub(infos, doc):
"""
:param infos: Dict, 表示被替换内容对应的正则表达式及替换对象
:param doc: 表示正文
:return: 返回一个完成替换的文本
"""
for v, k in infos:
doc = re.sub(v, k, doc)
return doc
# 接下来取每个样本的前n句话(或者不多于前n句话的内容),再做一次进行对比
# 取前十句话的原理是,对!?。等表示语义结束的符号进行计数,满十次为止
def top_n_sent(n, doc, percentile=1):
"""
:param n: n指句子的数量,这个函数会返回一段文本中前n句话,若文本内容不多于n句,则全文输出
:param word: 指正文内容
:param percentile: 按照分位数来取句子时,要输入的分位,比如一共有十句话,取50%分位就是5句
如果有11句话,向下取整也是输出5句
:return: 返回字符串:前n句话
"""
info = '[。?!]'
# 在这个函数体内,函数主体语句的作用域大于循环体,因此循环内的变量相当于局部变量
# 因此想在循环外直接返回,就会出现没有定义的错误,因此可以做一个全局声明
# 但是不建议这样做,因为如果函数外有一个变量恰巧和局部变量重名,那函数外的变量也会被改变
# 因此还是推荐多使用迭代器,把循环包裹成迭代器,可以解决很多问题
# 而且已经封装好的迭代器,例如re.findall_iter,就不用另外再去写了,调用起来很方便
# 如下,第一行代码的作用是用列表包裹迭代器,形成一个生成器的列表
# 每个生成器都存在自己的 Attribute
re_iter = list(re.finditer(info, doc))
# max_iter 是 re 匹配到的最大次数
max_iter = len(re_iter)
# 这一句表示,正文过于简短,或者没有标点,此时直接输出全文
if max_iter == 0:
return doc
# 考虑 percentile 的情况,如果总共有11句,就舍弃掉原来的 n,直接改为总句数的 percentile 对应的句子数
# 注意是向下取整
if percentile != 1:
n = math.ceil(percentile * max_iter)
# 如果匹配到至少一句,循环自然结束,输出结果
if n > 0:
return doc[0: re_iter[n - 1].end()]
# 如果正文过于简短,或设定的百分比过低,一句话都凑不齐,此时直接输出第一句
elif n == 0:
return doc[0: re_iter[0].end()]
# 如果匹配到的句子数大于 n,此时只取前 n 句
if max_iter >= n:
return doc[0: re_iter[n - 1].end()]
# 如果匹配到的句子不足 n 句,直接输出全部内容
elif 0 < max_iter < n:
return doc[0: re_iter[-1].end()]
# 为减少重名的可能,尽量在函数体内减少变量的使用
def dtm_sort_filter(dtm, keymap, name=None):
"""
:param dtm: 前面生成的词频统计矩阵:Doc-Term-Matrix
:param keymap: 字典,标明了 类别-关键词列表 两者关系
:param name: 最终生成 Excel 文件的名称(需要包括后缀)
:return: 返回一个字典,字典包含两个 pandas.DataFrame: 一个是表示各个种类是否存在的二进制表,另一个是最终的种类数
"""
dtm = dtm.applymap(lambda x: 1 if x != 0 else 0)
strips = {}
for i, row in dtm.iterrows():
strip = {}
for k, v in keymap.items():
strip[k] = 0
for item in v:
try:
strip[k] += row[item]
except KeyError:
pass
strips[i] = strip
dtm_class = pd.DataFrame.from_dict(strips, orient='index')
dtm_class = dtm_class.applymap(lambda x: 1 if x != 0 else 0)
dtm_final = dtm_class.agg(np.sum, axis=1)
result = {'DTM_class': dtm_class, 'DTM_final': dtm_final}
return result
def dtm_point_giver(dtm, keymap, scoremap, name=None):
"""
:param dtm: 前面生成的词频统计矩阵:Doc-Term-Matrix
:param keymap: 字典,{TypeA: [word1, word2, word3, ……], TypeB: ……}
:param scoremap: 字典,标明了 类别-分值 两者关系
:param name: 最终生成 Excel 文件的名称(需要包括后缀)
:return: 返回一个 pandas.DataFrame,表格有两列,一列是文本id,一列是文本的分值(所有关键词的分值取最高)
"""
dtm = dtm.applymap(lambda x: 1 if x != 0 else 0)
# 非 keymap 中词会被过滤掉
strips = {}
for i, row in dtm.iterrows():
strip = {}
for k, v in keymap.items():
strip[k] = 0
for item in v:
try:
strip[k] += row[item]
except KeyError:
pass
strips[i] = strip
dtm_class = pd.DataFrame.from_dict(strips, orient='index')
dtm_class = dtm_class.applymap(lambda x: 1 if x != 0 else 0)
# 找到 columns 对应的分值
keywords = list(dtm_class.columns)
multiplier = []
for keyword in keywords:
multiplier.append(scoremap[keyword])
# DataFrame 的乘法运算,不会改变其 index 和 columns
dtm_score = dtm_class.mul(multiplier, axis=1)
# 取一个最大值来赋分
dtm_score = dtm_score.agg(np.max, axis=1)
return dtm_score
def dfc_sort_filter(dfc, keymap, name=None):
"""
:param dfc: 前面生成的词频统计明细表:Doc-Frequency-Context
:param keymap: 字典,标明了 关键词-所属种类 两者关系
:param name: 最终生成 Excel 文件的名称(需要包括后缀)
:return: 返回一个 pandas.DataFrame,表格有两列,一列是文本id,一列是文本中所包含的业务种类数
"""
# 接下来把关键词从 dfc 的 Multi-index 中拿出来(这个index本质上就是一个ndarray)
# 拿出来关键词就可以用字典进行映射
# 先新建一列class-id,准备放置映射的结果
dfc.insert(0, 'cls-id', None)
# 开始遍历
for i in range(0, len(dfc.index)):
dfc.iloc[i, 0] = keymap[dfc.index[i][2]]
# 理论上就可以直接通过 excel 的分类计数功能来看业务种类数了
# 失败了,excel不能看种类数,只能给所有值做计数,因此还需要借助python的unique语句
# dfc.to_excel('被监管业务统计.xlsx')
# 可以对于每一种index做一个计数,使用loc索引到的对象是一个DataFrame
# 先拿到一个doc id的列表
did = []
for item in dfc.index.unique():
did.append(item[0])
did = list( | pd.Series(did) | pandas.Series |
import os
import logging
import numpy as np
import pandas as pd
from astropy import units as u
from astropy.convolution import convolve_fft, Gaussian2DKernel, convolve
from astropy.coordinates import SkyCoord, Angle
from astropy.io import fits
from astropy.table import Table
from regions import CircleSkyRegion
import dill as pickle # Using dill allows candidate loggers to be pickled.
from scipy.interpolate import CubicSpline
import src.globals as glo
#TODO: causes circular import
# from src.identify.richness import richness
# from src.identify.targets import write_sources, write_col_cat
from src.utils import Str, circle_skyregions_mask
from memory_profiler import profile
def calculate_col_dist(z, col, models, c=None, cat=None):
"""
Parameters
----------
col : {'gr', 'ri', 'iz'}
Colour.
z : float
Redshift.
cat : Table
config : pandas.DataFrame
models : pandas.DataFrame
Models of the red-sequence normalisation and slope as a function of
redshift and m* for each colour.
width_model : pandas.DataFrame
Models of the red-sequence width as a function of redshift and i-band
magnitude for a specific colour.
Returns
-------
col_dist : array-like
Colour distance array.
"""
msg = 'Must provide either a cluster candidate or catalog.'
assert (c is not None or cat is not None), msg
if c is not None and cat is None:
cat = c.file_calibrated_combined.clean_cat
config = glo.settings.loc[col, :]
# Modify the detmodel magnitudes within great circle arc distances less
# than 2500 arcseconds (i.e. those in the subcatalog).
col_mag_detmodel = cat[f'MAG_DETMODEL_{col:u}'] + config.loc['CORRECTION_MAG_DETMODEL']
# The as_matrix() method converts each pandas.series to a np.array.
z_model = models['REDSHIFT'].as_matrix()
norm_model = models[f'NORMALISATION_{col:u}'].as_matrix()
slope_model = models[f'SLOPE_{col:u}'].as_matrix()
# Determine idx corresponding to the the redshift step in the red sequence
# model data that is most similar redshift of the candidate.
idx_model = np.argmin(np.absolute(z_model - z))
# Determine the col distance from the red sequence.
# Imagine col (y-axis) vs magnitude (x-axis) with y = mx + c
mag_auto_i = cat['MAG_AUTO_I']
col_model = (slope_model[idx_model] * mag_auto_i) + norm_model[idx_model]
col_dist = np.array(col_mag_detmodel - col_model)
return col_dist
def calculate_weights(c, z, col, name_models="rs_norm_slope"):
"""
The weights can be seen as filter functions that follow the expected
behaviour of cluster galaxies in color, magnitude and angular space and
therefore maximize the chance to detect the cluster at the correct
redshift :cite:`Klein2017aa`.
Interpolation is a method of constructing new data points within the
range of a discrete set of known data points. We can apply this to our
width files using the discrete values of the i magnitude bins and then
construct new data points for the i mag auto values which were
produced by SExtractor. This provides a way to estimate the intrinsic
col scatter.
To calculate the weights for each col a Gaussian PDF is implemented.
The PDF for scipy.stats.norm is: :math:`f(x) = e^{-x^2 / 2}/ \sqrt{2\pi}`
This means you also need to also divide the standard deviation to get
it into the correct form. location (loc) is the mean of the
distribution and scale is the same as standard deviation.
TODO: This normalisation comes from the distribution you expect and
being able to integrate over the richness.
We normalize the weights by N(sigmac1(z),sigmac2(z),sigmac3(z)), which is
the # average weight of a population of galaxies that follows the expected
distribution of a cluster at the investigated redshift.
Parameters
----------
col : {'gr', 'ri', 'iz'}
Colour.
z : float
Redshift.
cat :
name_models : str, optional
File name containing the models of the red-sequence m*, normalisation
and slope as a function of redshift and i-band magnitude for each
colour. This must correspond to a file that resides in the
os.path.join('src', 'models') directory.
c : Candidate
Galaxy cluster candidate.
check :
Returns
----------
References
----------
"""
# Load variables from red sequence models Tables are also numpy arrays.
path_models = os.path.join(glo.DIR_MODELS, name_models)
models = | pd.read_table(path_models, delim_whitespace=True, header=0) | pandas.read_table |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from skimage import io, filters, feature
import sys
from bisect import bisect_left
import time as time
from tqdm.auto import tqdm
# -------------------------------
# Functions
def apply_gaussian_filter(fluxes, sigma):
return filters.gaussian(image=fluxes, sigma=sigma)
def closest(myList, myNumber):
"""
Assumes myList is sorted. Returns closest value to myNumber.
If two numbers are equally close, return the smallest number.
"""
pos = bisect_left(myList, myNumber)
if pos == 0: return myList[0]
if pos == len(myList): return myList[-1]
before = myList[pos - 1]
after = myList[pos]
if after - myNumber < myNumber - before: return after
else: return before
def find_midpoint(flux_interval, wavelength_interval, gradient_interval, spectralline_wavelength):
"""
Function that returns the closest critical point to the spectral line. If none is found, it returns a NaN.
"""
criticalpoints = []
for i in range(len(flux_interval)-1):
# Check if the derivative changes sign between i and i+1
if (gradient_interval[i] < 0) != (gradient_interval[i + 1] < 0):
criticalpoints.append(wavelength_interval[i])
if len(criticalpoints) == 0:
return np.nan
elif len(criticalpoints) == 1:
return criticalpoints[0]
else:
return closest(criticalpoints, spectralline_wavelength)
def find_criticalpoint(flux_interval, wavelength_interval, gradient_interval):
"""
Function that returns the first critical point it finds in the interval. If none is found, it returns a NaN.
"""
for i in range(len(flux_interval)-1):
# Check if the derivative changes sign between i and i+1
if (gradient_interval[i] < 0) != (gradient_interval[i + 1] < 0):
wavelength_critical = wavelength_interval[i]
return wavelength_critical
return np.nan
def find_criticalpoint_extended(flux_interval, wavelength_interval, gradient_interval):
"""
Function that returns the first critical point it finds in the interval. If none is found, it returns a NaN.
It has a delayed start, which is useful for finding the MgII spectral line.
"""
for i in range(30, len(flux_interval)-1):
# Check if the derivative changes sign between i and i+1
if (gradient_interval[i] < 0) != (gradient_interval[i + 1] < 0):
wavelength_critical = wavelength_interval[i]
return wavelength_critical
return np.nan
# -------------------------------
# Useful website: http://www.stat.cmu.edu/tr/tr828/tr828.pdf
# The spectral lines
qso_lines = ['MgII_em', 'Hgamma_em', 'Hbeta_em', 'OIII_em', 'OIII_em', 'Halpha_em', 'OII_em']
star_lines = ['Halpha_ab', 'Hbeta_ab', 'Hgamma_ab', 'Hdelta_ab', 'NaI_ab']
gal_lines = ['Na_ab', 'Mg_ab', 'Halpha_em', 'S2_em', 'Hbeta_em', 'Gband_ab', 'CAIIH_ab', 'CAIIK_ab', 'OII_em']#, 'balmer_break']
wl_qso = [2799, 4342, 4861, 4960, 5008, 6565, 3727]
wl_star = [6565, 4861, 4340, 4101, 5896]
wl_gal = [5893, 5175, 6565, 6716, 4861, 4304, 3933.7, 3968, 3727] #, 4000]
# Complete spectral lines
speclines_name = ['MgII_em', 'Hgamma_em', 'Hbeta_em', 'OIII_em', 'OIII_em', 'Halpha_em', 'OII_em','Hdelta_ab',
'NaI_ab', 'Mg_ab', 'S2_em', 'Gband_ab', 'CAIIH_ab', 'CAIIK_ab']
speclines = [2799, 4342, 4861, 4960, 5008, 6565, 3727, 4101, 5895, 5175, 6716, 4304, 3933.7, 3968]
# Sort lists on wavelengths
sort_index = np.argsort(speclines)
speclines = np.array(speclines)[sort_index]
speclines_name = np.array(speclines_name)[sort_index]
def continuum(X, slope, intercept):
return slope * X + intercept
def spectrallines_1source(flux, wavelength, z, sigma=4, delta1=10, delta2=80):
"""
:param flux: Array of flux values of 1 source.
:param wavelength: Array of wavelength values of 1 source.
:param z: Redshift of the source.
:param sigma: Smoothing parameter (default is 4).
:param delta1: Interval in which to look for the exact midpoint of the peak (default is 5).
:param delta2: Interval in which to look for the begin and end points of the peak (default is 80).
:return: Vector with Pseudo-Equivalent Widths (EW) for each spectral line (for 1 source).
"""
# Smooth the flux and compute its gradient
smoothflux = apply_gaussian_filter(flux, sigma=sigma)
gradient = np.gradient(smoothflux, wavelength)
# The spectral lines EW will be saved in this list
final_vector = []
for s in range(len(speclines)):
# -------- Step 1: find the exact midpoint of spectral peak --------
# Look for the critical points within an interval of delta around the predicted peaks.
line_min = speclines[s] * (1 + z) - delta1
line_max = speclines[s] * (1 + z) + delta1
interval_flux = smoothflux[(line_min < wavelength) & (wavelength < line_max)]
interval_wavelength = np.array(wavelength[(line_min < wavelength) & (wavelength < line_max)])
interval_gradient = gradient[(line_min < wavelength) & (wavelength < line_max)]
# If the spectral line is outside of the wavelength range: EW = 0
if len(interval_flux) == 0.0:
EW = 0.0
final_vector.append(EW)
continue
# Find the exact midpoint in a small interval
wavelength_mid = find_midpoint(interval_flux, interval_wavelength, interval_gradient, speclines[s] * (1 + z))
# If still no critical point is found: use location of spectral line
if np.isnan(wavelength_mid):
wavelength_mid = speclines[s] * (1+z)
# -------- Step 2: find the begin and end points --------
# Define the intervals to look at
end_right = wavelength_mid - delta2
end_left = wavelength_mid + delta2
interval_r_flux = np.flip(smoothflux[(end_right < wavelength) & (wavelength < wavelength_mid)])
interval_r_wavelength = np.flip(np.array(wavelength[(end_right < wavelength) & (wavelength < wavelength_mid)]))
interval_r_gradient = np.flip(gradient[(end_right < wavelength) & (wavelength < wavelength_mid)])
interval_l_flux = smoothflux[(wavelength_mid < wavelength) & (wavelength < end_left)]
interval_l_wavelength = np.array(wavelength[(wavelength_mid < wavelength) & (wavelength < end_left)])
interval_l_gradient = gradient[(wavelength_mid < wavelength) & (wavelength < end_left)]
# Find start point
if s == 0: # for MgII: use different function, that ignores the first critical point
wavelength_start = find_criticalpoint_extended(interval_r_flux, interval_r_wavelength, interval_r_gradient)
else:
wavelength_start = find_criticalpoint(interval_r_flux, interval_r_wavelength, interval_r_gradient)
if len(interval_r_wavelength) == 0: # If there are no points to right: use first point of interval
wavelength_start = interval_wavelength[0]
# Find end point
if s == 0: # for MgII: use different function, that ignores the first critical point
wavelength_end = find_criticalpoint_extended(interval_l_flux, interval_l_wavelength, interval_l_gradient)
elif len(interval_l_wavelength) == 0: # If there are no points to left: use last point of interval
wavelength_end = interval_wavelength[-1]
else:
wavelength_end = find_criticalpoint(interval_l_flux, interval_l_wavelength, interval_l_gradient)
# If no critical points are found in the interval:
if np.isnan(wavelength_start):
if not np.isnan(wavelength_end): # Critical point found for end point: mirror that distance
wavelength_start = closest(np.flip(interval_r_wavelength), wavelength_mid - (wavelength_end - wavelength_mid))
else: # None found: take point closest to end of interval
wavelength_start = closest(np.flip(interval_r_wavelength), end_right)
if np.isnan(wavelength_end):
if not np.isnan(wavelength_start): # Critical point found for start point: mirror that distance
wavelength_end = closest(interval_l_wavelength, wavelength_mid + (wavelength_mid - wavelength_start))
else: # None found: take point closest to end of interval
wavelength_end = closest(interval_l_wavelength, end_left)
# Get corresponding indices of the start and end points
index_start = list(wavelength).index(wavelength_start)
index_end = list(wavelength).index(wavelength_end)
# -------- Step 3: Make continuum --------
# Connect the start and end point by a straight line. --> y = a x + b
if wavelength_end == wavelength_start:
slope = 0.0
else:
slope = (smoothflux[index_end] - smoothflux[index_start]) / (wavelength_end - wavelength_start)
intercept = smoothflux[index_start] - slope * wavelength_start
#test_wavelength = np.linspace(wavelength_start, wavelength_end, 100)
#test_continuum = continuum(test_wavelength)
# -------- Step 4: Compute Pseudo-Equivalent Widths (EW) --------
# Define the interval to look at: all points between start and end point of spectral line
EWinterval_flux = smoothflux[(wavelength_start < wavelength) & (wavelength < wavelength_end)]
EWinterval_wavelength = np.array(wavelength[(wavelength_start < wavelength) & (wavelength < wavelength_end)])
EWinterval_continuum = continuum(EWinterval_wavelength, slope, intercept)
if len(EWinterval_wavelength) == 0 or len(EWinterval_wavelength) == 1 or np.any(EWinterval_continuum == 0.0):
# No points? EW = 0
EW = 0.0
else:
# Make an array of delta_wavelength. This is the width of the bars.
Delta_wavelength = np.append(np.diff(EWinterval_wavelength), np.diff(EWinterval_wavelength)[-1])
# Obtain the area by multiplying the height ( = flux - continuum) by the width
EW = np.sum((EWinterval_flux - EWinterval_continuum) / EWinterval_continuum * Delta_wavelength)
# Add the found EW to the vector
final_vector.append(EW)
return final_vector
# -------------------------------
# Load the data and extract the important columns
# spectra = pd.read_pickle('../data/sdss/FinalTable_Nikki.pkl')
# -------------------------------
# Compute the spectral line vectors for all the data
def get_spectrallines(df_fluxes, df_source_info, df_wavelengths, from_sp, to_sp, save):
"""
get_spectrallines()
Takes a fluxes from DataFrame with spectra, and computes the area under curve at spectral lines
to get a magnitude for each spectral line of interest (hard coded list of areas of interest)
Parameters
----------
df_fluxes : pandas.DataFrame
A table containing only fluxes and the corresponding objid as the first column
df_source_info : pandas.DataFrame
table containing all additional data about a source
df_wavelengths : pandas.DataFrame
table containing only a list of wavelengths that all sources share.
from_sp : int
The index of spectrum from which the spectral lines are calculated. Only used for the filename
at saving
to_sp : int
The index of spectrum to which the spectral lines are calculated. Only used for the filename
at saving
save: When True, saves the resulting DataFrame
When False, doesn't save the DataFrame
Returns
-------
df : pd.DataFrame
A pandas DataFrame with 2 columns.
columns: 'spectral_lines',
'objid'
"""
fluxes = np.delete(df_fluxes.values, 0, axis=1) # remove objids
print(f'fluxes = {fluxes}')
wavelengths = df_wavelengths.values.flatten()
objid = df_source_info['objid'].to_numpy()
z = df_source_info['z'].to_numpy()
# Create lists for the 2 columns: the spectral lines vector and the objID list
speclines_vector = []
speclines_objid = []
# Loop over all the sources in the data file: get for each one the vector with spectral lines
m = 0
for n in tqdm(range(len(df_source_info)), desc='Computing Spectral Lines: '):
try:
vector = spectrallines_1source(np.array(fluxes[n]), np.array(wavelengths), z[n])
speclines_vector.append(vector)
speclines_objid.append(objid[n])
except:
m += 1
# print("Something went wrong with the spectral lines! At iteration ", n)
speclines_vector.append([np.nan] * 14)
speclines_objid.append(objid[n])
# Merge the two columns together in a data frame
df = {}
df['objid'] = speclines_objid
df['spectral_lines'] = speclines_vector
#df['class'] = specclass
df = | pd.DataFrame(df) | pandas.DataFrame |
"""
Copyright (c) 2021, Stanford Neuromuscular Biomechanics Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import pandas as pd
import tensorflow as tf
from datapreprocessing import selectFeats
from model import run_model
def losocv_split(subjectIDs):
"""Create leave-one-subject-out cross-validation train/test splits.
Args:
subjectIDs (list): subjectID corresponding to each example.
Returns:
splits (list of lists): each fold's train and test indices.
subjectIDset (list): unique IDs, in held-out-test-set order
"""
subjectIDset = list(set(subjectIDs))
splits = []
for subject in subjectIDset:
test_idx = [i for i in range(len(subjectIDs)) if subjectIDs[i]==subject]
train_idx = [i for i in range(len(subjectIDs)) if subjectIDs[i]!=subject]
splits.append([train_idx,test_idx])
return splits, subjectIDset
def get_sample_weights(y, subjectIDs):
"""Computes sample weights such that each label for each subject
has equal weight. Subject's examples have weights totalling 1, and
each label within a subject totals to 0.5.
Args:
y (np array): label corresponding to each example.
subjectIDs (list): subjectID corresponding to each example.
Returns: sample_weights (np array): weight corresponding to each
example.
"""
subjects = list(set(subjectIDs))
sample_weights = np.empty_like(subjectIDs, dtype=float)
# loop through subjectIDs to count number of each label
for subject in subjects:
# get labels specific to subject
subj_idx = [i for i in range(len(subjectIDs))
if subjectIDs[i]==subject]
ysubj = y[subj_idx]
# compute weights for each subject (sum to 1)
subj_y_counts = np.zeros((2,)) # 2 for 2 classes
for i in range(2):
subj_y_counts[i] = np.count_nonzero(ysubj==i) # count number of each label
if subj_y_counts[i]==0:
raise Exception('subject missing a class example.') # missing subject example for 1+ classes
subj_weights = 1/(2*subj_y_counts)
# populate full sample weights matrix
subj_sample_weights = np.zeros_like(ysubj,dtype=float)
for i in range(len(ysubj)):
subj_sample_weights[i] = subj_weights[ysubj[i]]
sample_weights[subj_idx] = subj_sample_weights
return sample_weights
def create_dsets(data, train_idx, test_idx, batch_size=512):
"""Create tf train and test datasets for a single fold.
Args:
data (dict): holds all data matrices/lists.
train_idx (list): elements are bools. True for train data,
False for test data.
test_idx (list): elements are bools. True for test data,
False for train data.
batch_size (int): batch size for model training.
Returns:
train_dset (tf dataset): train_X, train_y, train_sample_weights
as components.
test_dset (tf dataset): test_X, test_y, test_sample_weights as
components.
"""
X = data['X']
y = data['y']
subjectIDs = np.array(data['subjectIDs'])
augment_idx = data['augment_idx']
train_X = X[train_idx]
train_y = y[train_idx]
train_subjectIDs = subjectIDs[train_idx]
test_X = X[test_idx]
test_y = y[test_idx]
test_subjectIDs = subjectIDs[test_idx]
test_augment_idx = augment_idx[test_idx]
# remove augmented examples from test set
non_augmented = (test_augment_idx==0)
test_X = test_X[non_augmented]
test_y = test_y[non_augmented]
test_subjectIDs = test_subjectIDs[non_augmented]
# get sample weights
train_sample_weights = get_sample_weights(train_y, train_subjectIDs)
test_sample_weights = get_sample_weights(test_y, test_subjectIDs)
train_dset = tf.data.Dataset.from_tensor_slices(
(train_X, train_y, train_sample_weights)).shuffle(
buffer_size=len(train_X),seed=0).batch(batch_size)
test_dset = tf.data.Dataset.from_tensor_slices(
(test_X, test_y, test_sample_weights)).shuffle(
buffer_size=len(test_X),seed=0).batch(batch_size)
return train_dset, test_dset
def train_models_losocv(data, IMUs, result_dir, batch_size, n_epoch,
verbose=False):
"""Perform leave-one-subject-out cross-validation for a sensor
set specified by arg IMUs.
Args:
data (dict): holds all data matrices/lists.
IMUs (list): individual sensors included in sensor set.
result_dir (str): directory in which results will reside.
batch_size (int): batch size for model training.
n_epoch (int): max number of epochs for model training.
verbose (bool): True to print test statements, False otherwise.
Returns:
labels (dict): columns include 'probas' (from model) and 'true'
(ground truth). One row for each fold.
"""
# select features of interest
data = selectFeats(data, IMUs)
subjectIDs = data['subjectIDs']
split, test_order = losocv_split(subjectIDs)
labels = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) 2021 <NAME>. All rights reserved.
# This code is licensed under Apache 2.0 with Commons Clause license (see LICENSE.md for details)
"""Custom data classes that subclass `vectorbt.data.base.Data`."""
import time
import warnings
from functools import wraps
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from vectorbt import _typing as tp
from vectorbt.data.base import Data
from vectorbt.utils.config import merge_dicts, get_func_kwargs
from vectorbt.utils.datetime_ import (
get_utc_tz,
get_local_tz,
to_tzaware_datetime,
datetime_to_ms
)
try:
from binance.client import Client as ClientT
except ImportError:
ClientT = tp.Any
try:
from ccxt.base.exchange import Exchange as ExchangeT
except ImportError:
ExchangeT = tp.Any
class SyntheticData(Data):
"""`Data` for synthetically generated data."""
@classmethod
def generate_symbol(cls, symbol: tp.Label, index: tp.Index, **kwargs) -> tp.SeriesFrame:
"""Abstract method to generate a symbol."""
raise NotImplementedError
@classmethod
def download_symbol(cls,
symbol: tp.Label,
start: tp.DatetimeLike = 0,
end: tp.DatetimeLike = 'now',
freq: tp.Union[None, str, pd.DateOffset] = None,
date_range_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.SeriesFrame:
"""Download the symbol.
Generates datetime index and passes it to `SyntheticData.generate_symbol` to fill
the Series/DataFrame with generated data."""
if date_range_kwargs is None:
date_range_kwargs = {}
index = pd.date_range(
start=to_tzaware_datetime(start, tz=get_utc_tz()),
end=to_tzaware_datetime(end, tz=get_utc_tz()),
freq=freq,
**date_range_kwargs
)
if len(index) == 0:
raise ValueError("Date range is empty")
return cls.generate_symbol(symbol, index, **kwargs)
def update_symbol(self, symbol: tp.Label, **kwargs) -> tp.SeriesFrame:
"""Update the symbol.
`**kwargs` will override keyword arguments passed to `SyntheticData.download_symbol`."""
download_kwargs = self.select_symbol_kwargs(symbol, self.download_kwargs)
download_kwargs['start'] = self.data[symbol].index[-1]
kwargs = merge_dicts(download_kwargs, kwargs)
return self.download_symbol(symbol, **kwargs)
def generate_gbm_paths(S0: float, mu: float, sigma: float, T: int, M: int, I: int,
seed: tp.Optional[int] = None) -> tp.Array2d:
"""Generate using Geometric Brownian Motion (GBM).
See https://stackoverflow.com/a/45036114/8141780."""
if seed is not None:
np.random.seed(seed)
dt = float(T) / M
paths = np.zeros((M + 1, I), np.float64)
paths[0] = S0
for t in range(1, M + 1):
rand = np.random.standard_normal(I)
paths[t] = paths[t - 1] * np.exp((mu - 0.5 * sigma ** 2) * dt + sigma * np.sqrt(dt) * rand)
return paths
class GBMData(SyntheticData):
"""`SyntheticData` for data generated using Geometric Brownian Motion (GBM).
Usage:
* See the example under `BinanceData`.
```pycon
>>> import vectorbt as vbt
>>> gbm_data = vbt.GBMData.download('GBM', start='2 hours ago', end='now', freq='1min', seed=42)
>>> gbm_data.get()
2021-05-02 14:14:15.182089+00:00 102.386605
2021-05-02 14:15:15.182089+00:00 101.554203
2021-05-02 14:16:15.182089+00:00 104.765771
... ...
2021-05-02 16:12:15.182089+00:00 51.614839
2021-05-02 16:13:15.182089+00:00 53.525376
2021-05-02 16:14:15.182089+00:00 55.615250
Freq: T, Length: 121, dtype: float64
>>> import time
>>> time.sleep(60)
>>> gbm_data = gbm_data.update()
>>> gbm_data.get()
2021-05-02 14:14:15.182089+00:00 102.386605
2021-05-02 14:15:15.182089+00:00 101.554203
2021-05-02 14:16:15.182089+00:00 104.765771
... ...
2021-05-02 16:13:15.182089+00:00 53.525376
2021-05-02 16:14:15.182089+00:00 51.082220
2021-05-02 16:15:15.182089+00:00 54.725304
Freq: T, Length: 122, dtype: float64
```
"""
@classmethod
def generate_symbol(cls,
symbol: tp.Label,
index: tp.Index,
S0: float = 100.,
mu: float = 0.,
sigma: float = 0.05,
T: tp.Optional[int] = None,
I: int = 1,
seed: tp.Optional[int] = None) -> tp.SeriesFrame:
"""Generate the symbol using `generate_gbm_paths`.
Args:
symbol (str): Symbol.
index (pd.Index): Pandas index.
S0 (float): Value at time 0.
Does not appear as the first value in the output data.
mu (float): Drift, or mean of the percentage change.
sigma (float): Standard deviation of the percentage change.
T (int): Number of time steps.
Defaults to the length of `index`.
I (int): Number of generated paths (columns in our case).
seed (int): Set seed to make the results deterministic.
"""
if T is None:
T = len(index)
out = generate_gbm_paths(S0, mu, sigma, T, len(index), I, seed=seed)[1:]
if out.shape[1] == 1:
return pd.Series(out[:, 0], index=index)
columns = pd.RangeIndex(stop=out.shape[1], name='path')
return pd.DataFrame(out, index=index, columns=columns)
def update_symbol(self, symbol: tp.Label, **kwargs) -> tp.SeriesFrame:
"""Update the symbol.
`**kwargs` will override keyword arguments passed to `GBMData.download_symbol`."""
download_kwargs = self.select_symbol_kwargs(symbol, self.download_kwargs)
download_kwargs['start'] = self.data[symbol].index[-1]
_ = download_kwargs.pop('S0', None)
S0 = self.data[symbol].iloc[-2]
_ = download_kwargs.pop('T', None)
download_kwargs['seed'] = None
kwargs = merge_dicts(download_kwargs, kwargs)
return self.download_symbol(symbol, S0=S0, **kwargs)
class YFData(Data):
"""`Data` for data coming from `yfinance`.
Stocks are usually in the timezone "+0500" and cryptocurrencies in UTC.
!!! warning
Data coming from Yahoo is not the most stable data out there. Yahoo may manipulate data
how they want, add noise, return missing data points (see volume in the example below), etc.
It's only used in vectorbt for demonstration purposes.
Usage:
* Fetch the business day except the last 5 minutes of trading data, and then update with the missing 5 minutes:
```pycon
>>> import vectorbt as vbt
>>> yf_data = vbt.YFData.download(
... "TSLA",
... start='2021-04-12 09:30:00 -0400',
... end='2021-04-12 09:35:00 -0400',
... interval='1m'
... )
>>> yf_data.get())
Open High Low Close \\
Datetime
2021-04-12 13:30:00+00:00 685.080017 685.679993 684.765015 685.679993
2021-04-12 13:31:00+00:00 684.625000 686.500000 684.010010 685.500000
2021-04-12 13:32:00+00:00 685.646790 686.820007 683.190002 686.455017
2021-04-12 13:33:00+00:00 686.455017 687.000000 685.000000 685.565002
2021-04-12 13:34:00+00:00 685.690002 686.400024 683.200012 683.715027
Volume Dividends Stock Splits
Datetime
2021-04-12 13:30:00+00:00 0 0 0
2021-04-12 13:31:00+00:00 152276 0 0
2021-04-12 13:32:00+00:00 168363 0 0
2021-04-12 13:33:00+00:00 129607 0 0
2021-04-12 13:34:00+00:00 134620 0 0
>>> yf_data = yf_data.update(end='2021-04-12 09:40:00 -0400')
>>> yf_data.get()
Open High Low Close \\
Datetime
2021-04-12 13:30:00+00:00 685.080017 685.679993 684.765015 685.679993
2021-04-12 13:31:00+00:00 684.625000 686.500000 684.010010 685.500000
2021-04-12 13:32:00+00:00 685.646790 686.820007 683.190002 686.455017
2021-04-12 13:33:00+00:00 686.455017 687.000000 685.000000 685.565002
2021-04-12 13:34:00+00:00 685.690002 686.400024 683.200012 683.715027
2021-04-12 13:35:00+00:00 683.604980 684.340027 682.760071 684.135010
2021-04-12 13:36:00+00:00 684.130005 686.640015 683.333984 686.563904
2021-04-12 13:37:00+00:00 686.530029 688.549988 686.000000 686.635010
2021-04-12 13:38:00+00:00 686.593201 689.500000 686.409973 688.179993
2021-04-12 13:39:00+00:00 688.500000 689.347595 687.710022 688.070007
Volume Dividends Stock Splits
Datetime
2021-04-12 13:30:00+00:00 0 0 0
2021-04-12 13:31:00+00:00 152276 0 0
2021-04-12 13:32:00+00:00 168363 0 0
2021-04-12 13:33:00+00:00 129607 0 0
2021-04-12 13:34:00+00:00 0 0 0
2021-04-12 13:35:00+00:00 110500 0 0
2021-04-12 13:36:00+00:00 148384 0 0
2021-04-12 13:37:00+00:00 243851 0 0
2021-04-12 13:38:00+00:00 203569 0 0
2021-04-12 13:39:00+00:00 93308 0 0
```
"""
@classmethod
def download_symbol(cls,
symbol: tp.Label,
period: str = 'max',
start: tp.Optional[tp.DatetimeLike] = None,
end: tp.Optional[tp.DatetimeLike] = None,
**kwargs) -> tp.Frame:
"""Download the symbol.
Args:
symbol (str): Symbol.
period (str): Period.
start (any): Start datetime.
See `vectorbt.utils.datetime_.to_tzaware_datetime`.
end (any): End datetime.
See `vectorbt.utils.datetime_.to_tzaware_datetime`.
**kwargs: Keyword arguments passed to `yfinance.base.TickerBase.history`.
"""
import yfinance as yf
# yfinance still uses mktime, which assumes that the passed date is in local time
if start is not None:
start = to_tzaware_datetime(start, tz=get_local_tz())
if end is not None:
end = to_tzaware_datetime(end, tz=get_local_tz())
return yf.Ticker(symbol).history(period=period, start=start, end=end, **kwargs)
def update_symbol(self, symbol: tp.Label, **kwargs) -> tp.Frame:
"""Update the symbol.
`**kwargs` will override keyword arguments passed to `YFData.download_symbol`."""
download_kwargs = self.select_symbol_kwargs(symbol, self.download_kwargs)
download_kwargs['start'] = self.data[symbol].index[-1]
kwargs = merge_dicts(download_kwargs, kwargs)
return self.download_symbol(symbol, **kwargs)
BinanceDataT = tp.TypeVar("BinanceDataT", bound="BinanceData")
class BinanceData(Data):
"""`Data` for data coming from `python-binance`.
Usage:
* Fetch the 1-minute data of the last 2 hours, wait 1 minute, and update:
```pycon
>>> import vectorbt as vbt
>>> binance_data = vbt.BinanceData.download(
... "BTCUSDT",
... start='2 hours ago UTC',
... end='now UTC',
... interval='1m'
... )
>>> binance_data.get()
2021-05-02 14:47:20.478000+00:00 - 2021-05-02 16:47:00+00:00: : 1it [00:00, 3.42it/s]
Open High Low Close Volume \\
Open time
2021-05-02 14:48:00+00:00 56867.44 56913.57 56857.40 56913.56 28.709976
2021-05-02 14:49:00+00:00 56913.56 56913.57 56845.94 56888.00 19.734841
2021-05-02 14:50:00+00:00 56888.00 56947.32 56879.78 56934.71 23.150163
... ... ... ... ... ...
2021-05-02 16:45:00+00:00 56664.13 56666.77 56641.11 56644.03 40.852719
2021-05-02 16:46:00+00:00 56644.02 56663.43 56605.17 56605.18 27.573654
2021-05-02 16:47:00+00:00 56605.18 56657.55 56605.17 56627.12 7.719933
Close time Quote volume \\
Open time
2021-05-02 14:48:00+00:00 2021-05-02 14:48:59.999000+00:00 1.633534e+06
2021-05-02 14:49:00+00:00 2021-05-02 14:49:59.999000+00:00 1.122519e+06
2021-05-02 14:50:00+00:00 2021-05-02 14:50:59.999000+00:00 1.317969e+06
... ... ...
2021-05-02 16:45:00+00:00 2021-05-02 16:45:59.999000+00:00 2.314579e+06
2021-05-02 16:46:00+00:00 2021-05-02 16:46:59.999000+00:00 1.561548e+06
2021-05-02 16:47:00+00:00 2021-05-02 16:47:59.999000+00:00 4.371848e+05
Number of trades Taker base volume \\
Open time
2021-05-02 14:48:00+00:00 991 13.771152
2021-05-02 14:49:00+00:00 816 5.981942
2021-05-02 14:50:00+00:00 1086 10.813757
... ... ...
2021-05-02 16:45:00+00:00 1006 18.106933
2021-05-02 16:46:00+00:00 916 14.869411
2021-05-02 16:47:00+00:00 353 3.903321
Taker quote volume
Open time
2021-05-02 14:48:00+00:00 7.835391e+05
2021-05-02 14:49:00+00:00 3.402170e+05
2021-05-02 14:50:00+00:00 6.156418e+05
... ...
2021-05-02 16:45:00+00:00 1.025892e+06
2021-05-02 16:46:00+00:00 8.421173e+05
2021-05-02 16:47:00+00:00 2.210323e+05
[120 rows x 10 columns]
>>> import time
>>> time.sleep(60)
>>> binance_data = binance_data.update()
>>> binance_data.get()
Open High Low Close Volume \\
Open time
2021-05-02 14:48:00+00:00 56867.44 56913.57 56857.40 56913.56 28.709976
2021-05-02 14:49:00+00:00 56913.56 56913.57 56845.94 56888.00 19.734841
2021-05-02 14:50:00+00:00 56888.00 56947.32 56879.78 56934.71 23.150163
... ... ... ... ... ...
2021-05-02 16:46:00+00:00 56644.02 56663.43 56605.17 56605.18 27.573654
2021-05-02 16:47:00+00:00 56605.18 56657.55 56605.17 56625.76 14.615437
2021-05-02 16:48:00+00:00 56625.75 56643.60 56614.32 56623.01 5.895843
Close time Quote volume \\
Open time
2021-05-02 14:48:00+00:00 2021-05-02 14:48:59.999000+00:00 1.633534e+06
2021-05-02 14:49:00+00:00 2021-05-02 14:49:59.999000+00:00 1.122519e+06
2021-05-02 14:50:00+00:00 2021-05-02 14:50:59.999000+00:00 1.317969e+06
... ... ...
2021-05-02 16:46:00+00:00 2021-05-02 16:46:59.999000+00:00 1.561548e+06
2021-05-02 16:47:00+00:00 2021-05-02 16:47:59.999000+00:00 8.276017e+05
2021-05-02 16:48:00+00:00 2021-05-02 16:48:59.999000+00:00 3.338702e+05
Number of trades Taker base volume \\
Open time
2021-05-02 14:48:00+00:00 991 13.771152
2021-05-02 14:49:00+00:00 816 5.981942
2021-05-02 14:50:00+00:00 1086 10.813757
... ... ...
2021-05-02 16:46:00+00:00 916 14.869411
2021-05-02 16:47:00+00:00 912 7.778489
2021-05-02 16:48:00+00:00 308 2.358130
Taker quote volume
Open time
2021-05-02 14:48:00+00:00 7.835391e+05
2021-05-02 14:49:00+00:00 3.402170e+05
2021-05-02 14:50:00+00:00 6.156418e+05
... ...
2021-05-02 16:46:00+00:00 8.421173e+05
2021-05-02 16:47:00+00:00 4.404362e+05
2021-05-02 16:48:00+00:00 1.335474e+05
[121 rows x 10 columns]
```
"""
@classmethod
def download(cls: tp.Type[BinanceDataT],
symbols: tp.Labels,
client: tp.Optional["ClientT"] = None,
**kwargs) -> BinanceDataT:
"""Override `vectorbt.data.base.Data.download` to instantiate a Binance client."""
from binance.client import Client
from vectorbt._settings import settings
binance_cfg = settings['data']['binance']
client_kwargs = dict()
for k in get_func_kwargs(Client):
if k in kwargs:
client_kwargs[k] = kwargs.pop(k)
client_kwargs = merge_dicts(binance_cfg, client_kwargs)
if client is None:
client = Client(**client_kwargs)
return super(BinanceData, cls).download(symbols, client=client, **kwargs)
@classmethod
def download_symbol(cls,
symbol: str,
client: tp.Optional["ClientT"] = None,
interval: str = '1d',
start: tp.DatetimeLike = 0,
end: tp.DatetimeLike = 'now UTC',
delay: tp.Optional[float] = 500,
limit: int = 500,
show_progress: bool = True,
tqdm_kwargs: tp.KwargsLike = None) -> tp.Frame:
"""Download the symbol.
Args:
symbol (str): Symbol.
client (binance.client.Client): Binance client of type `binance.client.Client`.
interval (str): Kline interval.
See `binance.enums`.
start (any): Start datetime.
See `vectorbt.utils.datetime_.to_tzaware_datetime`.
end (any): End datetime.
See `vectorbt.utils.datetime_.to_tzaware_datetime`.
delay (float): Time to sleep after each request (in milliseconds).
limit (int): The maximum number of returned items.
show_progress (bool): Whether to show the progress bar.
tqdm_kwargs (dict): Keyword arguments passed to `tqdm`.
For defaults, see `data.binance` in `vectorbt._settings.settings`.
"""
if client is None:
raise ValueError("client must be provided")
if tqdm_kwargs is None:
tqdm_kwargs = {}
# Establish the timestamps
start_ts = datetime_to_ms(to_tzaware_datetime(start, tz=get_utc_tz()))
try:
first_data = client.get_klines(
symbol=symbol,
interval=interval,
limit=1,
startTime=0,
endTime=None
)
first_valid_ts = first_data[0][0]
next_start_ts = start_ts = max(start_ts, first_valid_ts)
except:
next_start_ts = start_ts
end_ts = datetime_to_ms(to_tzaware_datetime(end, tz=get_utc_tz()))
def _ts_to_str(ts: tp.DatetimeLike) -> str:
return str(pd.Timestamp(to_tzaware_datetime(ts, tz=get_utc_tz())))
# Iteratively collect the data
data: tp.List[list] = []
with tqdm(disable=not show_progress, **tqdm_kwargs) as pbar:
pbar.set_description(_ts_to_str(start_ts))
while True:
# Fetch the klines for the next interval
next_data = client.get_klines(
symbol=symbol,
interval=interval,
limit=limit,
startTime=next_start_ts,
endTime=end_ts
)
if len(data) > 0:
next_data = list(filter(lambda d: next_start_ts < d[0] < end_ts, next_data))
else:
next_data = list(filter(lambda d: d[0] < end_ts, next_data))
# Update the timestamps and the progress bar
if not len(next_data):
break
data += next_data
pbar.set_description("{} - {}".format(
_ts_to_str(start_ts),
_ts_to_str(next_data[-1][0])
))
pbar.update(1)
next_start_ts = next_data[-1][0]
if delay is not None:
time.sleep(delay / 1000) # be kind to api
# Convert data to a DataFrame
df = pd.DataFrame(data, columns=[
'Open time',
'Open',
'High',
'Low',
'Close',
'Volume',
'Close time',
'Quote volume',
'Number of trades',
'Taker base volume',
'Taker quote volume',
'Ignore'
])
df.index = pd.to_datetime(df['Open time'], unit='ms', utc=True)
del df['Open time']
df['Open'] = df['Open'].astype(float)
df['High'] = df['High'].astype(float)
df['Low'] = df['Low'].astype(float)
df['Close'] = df['Close'].astype(float)
df['Volume'] = df['Volume'].astype(float)
df['Close time'] = | pd.to_datetime(df['Close time'], unit='ms', utc=True) | pandas.to_datetime |
import logging
import unittest
import numpy as np
import pandas as pd
import scipy.stats as stats
from batchglm.api.models.tf1.glm_nb import Simulator
import diffxpy.api as de
class TestConstrained(unittest.TestCase):
def test_forfatal_from_string(self):
"""
Test if _from_string interface is working.
n_cells is constant as the design matrix and constraints depend on it.
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
n_cells = 2000
n_genes = 2
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=0)
sim.generate()
# Build design matrix:
dmat = np.zeros([n_cells, 6])
dmat[:, 0] = 1
dmat[:500, 1] = 1 # bio rep 1
dmat[500:1000, 2] = 1 # bio rep 2
dmat[1000:1500, 3] = 1 # bio rep 3
dmat[1500:2000, 4] = 1 # bio rep 4
dmat[1000:2000, 5] = 1 # condition effect
coefficient_names = ['intercept', 'bio1', 'bio2', 'bio3', 'bio4', 'treatment1']
dmat_est = pd.DataFrame(data=dmat, columns=coefficient_names)
dmat_est_loc, _ = de.utils.design_matrix(dmat=dmat_est, return_type="dataframe")
dmat_est_scale, _ = de.utils.design_matrix(dmat=dmat_est, return_type="dataframe")
# Build constraints:
constraints_loc = de.utils.constraint_matrix_from_string(
dmat=dmat_est_loc.values,
coef_names=dmat_est_loc.columns,
constraints=["bio1+bio2=0", "bio3+bio4=0"]
)
constraints_scale = de.utils.constraint_matrix_from_string(
dmat=dmat_est_scale.values,
coef_names=dmat_est_scale.columns,
constraints=["bio1+bio2=0", "bio3+bio4=0"]
)
test = de.test.wald(
data=sim.input_data,
dmat_loc=dmat_est_loc,
dmat_scale=dmat_est_scale,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
coef_to_test=["treatment1"]
)
_ = test.summary()
def test_forfatal_from_dict(self):
"""
Test if dictionary-based constraint interface is working.
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
n_cells = 2000
n_genes = 2
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=0)
sim.generate()
# Build design matrix:
sample_description = pd.DataFrame({
"cond": ["cond"+str(i // 1000) for i in range(n_cells)],
"batch": ["batch"+str(i // 500) for i in range(n_cells)]
})
test = de.test.wald(
data=sim.input_data,
sample_description=sample_description,
formula_loc="~1+cond+batch",
formula_scale="~1+cond+batch",
constraints_loc={"batch": "cond"},
constraints_scale={"batch": "cond"},
coef_to_test=["cond[T.cond1]"]
)
_ = test.summary()
def test_null_distribution_wald_constrained(self, n_genes: int = 100):
"""
Test if de.wald() with constraints generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distribution and a uniform distribution.
n_cells is constant as the design matrix and constraints depend on it.
:param n_genes: Number of genes to simulate (number of tests).
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
n_cells = 2000
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=0)
sim.generate()
# Build design matrix:
sample_description = pd.DataFrame({
"cond": ["cond" + str(i // 1000) for i in range(n_cells)],
"batch": ["batch" + str(i // 500) for i in range(n_cells)]
})
test = de.test.wald(
data=sim.input_data,
sample_description=sample_description,
formula_loc="~1+cond+batch",
formula_scale="~1+cond+batch",
constraints_loc={"batch": "cond"},
constraints_scale={"batch": "cond"},
coef_to_test=["cond[T.cond1]"]
)
_ = test.summary()
# Compare p-value distribution under null model against uniform distribution.
pval_h0 = stats.kstest(test.pval, 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0 is <= 0.05!"
return True
def _test_null_distribution_wald_constrained_2layer(self, n_genes: int = 100):
"""
Test if de.wald() with constraints generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distribution and a uniform distribution.
n_cells is constant as the design matrix and constraints depend on it.
:param n_genes: Number of genes to simulate (number of tests).
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
n_cells = 12000
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=0)
sim.generate()
# Build design matrix:
dmat = np.zeros([n_cells, 14])
dmat[:, 0] = 1
dmat[6000:12000, 1] = 1 # condition effect
dmat[:1000, 2] = 1 # bio rep 1 - treated 1
dmat[1000:3000, 3] = 1 # bio rep 2 - treated 2
dmat[3000:5000, 4] = 1 # bio rep 3 - treated 3
dmat[5000:6000, 5] = 1 # bio rep 4 - treated 4
dmat[6000:7000, 6] = 1 # bio rep 5 - untreated 1
dmat[7000:9000, 7] = 1 # bio rep 6 - untreated 2
dmat[9000:11000, 8] = 1 # bio rep 7 - untreated 3
dmat[11000:12000, 9] = 1 # bio rep 8 - untreated 4
dmat[1000:2000, 10] = 1 # tech rep 1
dmat[7000:8000, 10] = 1 # tech rep 1
dmat[2000:3000, 11] = 1 # tech rep 2
dmat[8000:9000, 11] = 1 # tech rep 2
dmat[3000:4000, 12] = 1 # tech rep 3
dmat[9000:10000, 12] = 1 # tech rep 3
dmat[4000:5000, 13] = 1 # tech rep 4
dmat[10000:11000, 13] = 1 # tech rep 4
coefficient_names = ['intercept', 'treatment1',
'bio1', 'bio2', 'bio3', 'bio4', 'bio5', 'bio6', 'bio7', 'bio8',
'tech1', 'tech2', 'tech3', 'tech4']
dmat_est = | pd.DataFrame(data=dmat, columns=coefficient_names) | pandas.DataFrame |
"""
Once the CSV files of source_ids, ages, and references are assembled,
concatenate and merge them.
Date: May 2021.
Background: Created for v0.5 target catalog merge, to simplify life.
Contents:
AGE_LOOKUP: manual lookupdictionary of common cluster ages.
get_target_catalog
assemble_initial_source_list
verify_target_catalog
"""
import numpy as np, pandas as pd
import os
from glob import glob
from cdips.utils.gaiaqueries import (
given_source_ids_get_gaia_data, given_votable_get_df
)
from cdips.paths import DATADIR, LOCALDIR
clusterdatadir = os.path.join(DATADIR, 'cluster_data')
localdir = LOCALDIR
agefmt = lambda x: np.round(np.log10(x),2)
AGE_LOOKUP = {
# Rizzuto+17 clusters. Hyades&Praesepe age from Brandt and Huang 2015,
# following Rebull+17's logic.
'Hyades': agefmt(8e8),
'Praesepe': agefmt(8e8),
'Pleiades': agefmt(1.25e8),
'PLE': agefmt(1.25e8),
'Upper Sco': agefmt(1.1e7),
'Upper Scorpius': agefmt(1.1e7),
'Upper Sco Lit.': agefmt(1.1e7),
# Furnkranz+19
'ComaBer': agefmt(4e8),
'ComaBerNeighborGroup': agefmt(7e8),
# EsplinLuhman, and other
'Taurus': agefmt(5e6),
'TAU': agefmt(5e6),
# assorted
'PscEri': agefmt(1.25e8),
'LCC': agefmt(1.1e7),
'ScoOB2': agefmt(1.1e7),
'ScoOB2_PMS': agefmt(1.1e7),
'ScoOB2_UMS': agefmt(1.1e7),
# Meingast 2021
'Blanco 1': agefmt(140e6),
'IC 2391': agefmt(36e6),
'IC 2602': agefmt(40e6),
'Melotte 20': agefmt(87e6),
'Melotte 22': agefmt(125e6),
'NGC 2451A': agefmt(44e6),
'NGC 2516': agefmt(170e6),
'NGC 2547': agefmt(30e6),
'NGC 7092': agefmt(310e6),
'Platais 9': agefmt(100e6),
# Gagne2018 moving groups
'118TAU': agefmt(10e6),
'ABDMG': agefmt(149e6),
'CAR': agefmt(45e6),
'CARN': agefmt(200e6),
'CBER': agefmt(400e6),
'COL': agefmt(42e6),
'EPSC': agefmt(4e6),
'ETAC': agefmt(8e6),
'HYA': agefmt(750e6),
'IC2391': agefmt(50e6),
'IC2602': agefmt(40e6),
'LCC': agefmt(15e6),
'OCT': agefmt(35e6),
'PL8': agefmt(60e6),
'ROPH': agefmt(2e6),
'THA': agefmt(45e6),
'THOR': agefmt(22e6),
'Tuc-Hor': agefmt(22e6),
'TWA': agefmt(10e6),
'UCL': agefmt(16e6),
'CRA': agefmt(10e6),
'UCRA': agefmt(10e6),
'UMA': agefmt(414e6),
'USCO': agefmt(10e6),
'XFOR': agefmt(500e6),
'{beta}PMG': agefmt(24e6),
'BPMG': agefmt(24e6),
# CantatGaudin2019 vela
'cg19velaOB2_pop1': agefmt(46e6),
'cg19velaOB2_pop2': agefmt(44e6),
'cg19velaOB2_pop3': agefmt(40e6),
'cg19velaOB2_pop4': agefmt(35e6),
'cg19velaOB2_pop5': agefmt(25e6),
'cg19velaOB2_pop6': agefmt(20e6),
'cg19velaOB2_pop7': agefmt(11e6),
}
def assemble_initial_source_list(catalog_vnum):
"""
Given LIST_OF_LISTS_STARTER_v0.5.csv , exported from
/doc/list_of_cluster_member_lists.ods, clean and concatenate the cluster
members. Flatten the resulting list on source_ids, joining the cluster,
age, and bibcode columns into comma-separated strings.
"""
metadf = pd.read_csv(
os.path.join(clusterdatadir, 'LIST_OF_LISTS_STARTER_V0.6.csv')
)
metadf['bibcode'] = metadf.ads_link.str.extract("abs\/(.*)\/")
N_stars_in_lists = []
Nstars_with_age_in_lists = []
dfs = []
# for each table, concatenate into a dataframe of source_id, cluster,
# log10age ("age").
for ix, r in metadf.iterrows():
print(79*'-')
print(f'Beginning {r.reference_id}...')
csvpath = os.path.join(clusterdatadir, r.csv_path)
assert os.path.exists(csvpath)
df = pd.read_csv(csvpath)
df['reference_id'] = r.reference_id
df['reference_bibcode'] = r.bibcode
if 'HATSandHATNcandidates' in r.reference_id:
df['reference_bibcode'] = 'JoelHartmanPrivComm'
colnames = df.columns
#
# every CSV file needs a Gaia DR2 "source_id" column
#
if "source" in colnames:
df = df.rename(
columns={"source":"source_id"}
)
#
# every CSV file needs a "cluster name" name column
#
if "assoc" in colnames:
df = df.rename(
columns={"assoc":"cluster"} # moving groups
)
colnames = df.columns
if "cluster" not in colnames:
msg = (
f'WRN! for {r.reference_id} did not find "cluster" column. '+
f'Appending the reference_id ({r.reference_id}) as the cluster ID.'
)
print(msg)
df['cluster'] = r.reference_id
#
# every CSV file needs an "age" column, which can be null, but
# preferably is populated.
#
if "age" not in colnames:
if r.reference_id in [
'CantatGaudin2018a', 'CantatGaudin2020a', 'CastroGinard2020',
'GaiaCollaboration2018lt250', 'GaiaCollaboration2018gt250'
]:
# get clusters and ages from CG20b; use them as the reference
cg20bpath = os.path.join(
clusterdatadir,
"v05/CantatGaudin20b_cut_cluster_source_age.csv"
)
df_cg20b = pd.read_csv(cg20bpath)
cdf_cg20b = df_cg20b.drop_duplicates(subset=['cluster','age'])[
['cluster', 'age']
]
# cleaning steps
if r.reference_id == 'CastroGinard2020':
df['cluster'] = df.cluster.str.replace('UBC', 'UBC_')
elif r.reference_id in [
'GaiaCollaboration2018lt250',
'GaiaCollaboration2018gt250'
]:
df['cluster'] = df.cluster.str.replace('NGC0', 'NGC_')
df['cluster'] = df.cluster.str.replace('NGC', 'NGC_')
df['cluster'] = df.cluster.str.replace('IC', 'IC_')
df['cluster'] = df.cluster.str.replace('Stock', 'Stock_')
df['cluster'] = df.cluster.str.replace('Coll', 'Collinder_')
df['cluster'] = df.cluster.str.replace('Trump02', 'Trumpler_2')
df['cluster'] = df.cluster.str.replace('Trump', 'Trumpler_')
_df = df.merge(cdf_cg20b, how='left', on=['cluster'])
assert len(_df) == len(df)
df['age'] = _df['age']
print(
f'For {r.reference_id} got {len(df[~pd.isnull(df.age)])}/{len(df)} finite ages via CantatGaudin2020b crossmatch on cluster ID.'
)
del _df
elif (
('Zari2018' in r.reference_id)
or
('Oh2017' in r.reference_id)
or
('Ujjwal2020' in r.reference_id)
or
('CottenSong' in r.reference_id)
or
('HATSandHATNcandidates' in r.reference_id)
or
('SIMBAD' in r.reference_id)
or
('Gagne2018' in r.reference_id)
):
age = np.ones(len(df))*np.nan
df['age'] = age
else:
age_mapper = lambda k: AGE_LOOKUP[k]
age = df.cluster.apply(age_mapper)
df['age'] = age
N_stars_in_lists.append(len(df))
Nstars_with_age_in_lists.append(len(df[~pd.isnull(df.age)]))
dfs.append(df)
assert (
'source_id' in df.columns
and
'cluster' in df.columns
and
'age' in df.columns
)
metadf["Nstars"] = N_stars_in_lists
metadf["Nstars_with_age"] = Nstars_with_age_in_lists
# concatenation.
nomagcut_df = pd.concat(dfs)
assert np.sum(metadf.Nstars) == len(nomagcut_df)
# clean ages
sel = (nomagcut_df.age == -np.inf)
nomagcut_df.loc[sel,'age'] = np.nan
nomagcut_df['age'] = np.round(nomagcut_df.age,2)
#
# merge duplicates, and ','-join the cluster id strings, age values
#
scols = ['source_id', 'cluster', 'age', 'reference_id', 'reference_bibcode']
nomagcut_df = nomagcut_df[scols].sort_values(by='source_id')
for c in nomagcut_df.columns:
nomagcut_df[c] = nomagcut_df[c].astype(str)
print(79*'-')
print('Beginning aggregation (takes ~2-3 minutes for v0.5)...')
_ = nomagcut_df.groupby('source_id')
df_agg = _.agg({
"cluster": list,
"age": list,
"reference_id": list,
"reference_bibcode": list
})
u_sourceids = np.unique(nomagcut_df.source_id)
N_sourceids = len(u_sourceids)
assert len(df_agg) == N_sourceids
df_agg["source_id"] = df_agg.index
# turn the lists to comma separated strings.
outdf = pd.DataFrame({
"source_id": df_agg.source_id,
"cluster": [','.join(map(str, l)) for l in df_agg['cluster']],
"age": [','.join(map(str, l)) for l in df_agg['age']],
"mean_age": [np.round(np.nanmean(np.array(l).astype(float)),2) for l in df_agg['age']],
"reference_id": [','.join(map(str, l)) for l in df_agg['reference_id']],
"reference_bibcode": [','.join(map(str, l)) for l in df_agg['reference_bibcode']],
})
outpath = os.path.join(
clusterdatadir, f'list_of_lists_keys_paths_assembled_v{catalog_vnum}.csv'
)
metadf.to_csv(outpath, index=False)
print(f'Made {outpath}')
outpath = os.path.join(
clusterdatadir, f'cdips_targets_v{catalog_vnum}_nomagcut.csv'
)
outdf.to_csv(outpath, index=False)
print(f'Made {outpath}')
def verify_target_catalog(df, metadf):
"""
Check that each entry in the (pre magnitude cut) target catalog has
a source_id that matches the original catalog. (i.e., ensure that no
int/int64/str lossy conversion bugs have happened).
"""
print(79*'-')
print('Beginning verification...')
print(79*'-')
for ix, r in metadf.sort_values('Nstars').iterrows():
print(f'{r.reference_id} (Nstars={r.Nstars})...')
sel = df.reference_id.str.contains(r.reference_id)
df_source_ids = np.array(df.loc[sel, 'source_id']).astype(np.int64)
csvpath = os.path.join(clusterdatadir, r.csv_path)
df_true = pd.read_csv(csvpath)
if 'source_id' not in df_true.columns:
df_true = df_true.rename(columns={"source":"source_id"})
true_source_ids = (
np.unique(np.array(df_true.source_id).astype(np.int64))
)
np.testing.assert_array_equal(
np.sort(df_source_ids), np.sort(true_source_ids)
)
print('Verified that the pre-mag cut target catalog has source_ids that '
'correctly match the original. ')
print(79*'-')
def verify_gaia_xmatch(df, gdf, metadf):
"""
Check that each entry in the target catalog has a Gaia xmatch source_id
that matches the original catalog. For any that do not, understand why not.
"""
print(79*'-')
print('Beginning Gaia xmatch verification...')
print(79*'-')
gdf_source_ids = np.unique(np.array(gdf.source_id).astype(np.int64))
for ix, r in metadf.sort_values('Nstars').iterrows():
print(f'{r.reference_id} (Nstars={r.Nstars})...')
sel = df.reference_id.str.contains(r.reference_id)
df_source_ids = np.array(df.loc[sel, 'source_id']).astype(np.int64)
int1d = np.intersect1d(df_source_ids, gdf_source_ids)
if not len(int1d) == len(df_source_ids):
msg = f'\tWRN! {r.reference_id} only got {len(int1d)} Gaia xmatches.'
print(msg)
if 'NASAExoArchive' in r.reference_id:
csvpath = os.path.join(clusterdatadir, r.csv_path)
df_true = pd.read_csv(csvpath)
missing = df_source_ids[
~np.in1d(df_source_ids, gdf_source_ids)
]
# NOTE: should not be raised.
print('Verified that the pre-mag cut target catalog has source_ids that '
'match the original (or close enough). ')
print(79*'-')
def get_target_catalog(catalog_vnum, VERIFY=1):
"""
1. Assemble the target catalog (down to arbitrary brightness; i.e, just
clean and concatenate).
2. Manually async query the Gaia database based on those source_ids.
3. Verify the result, and merge and write it.
"""
csvpath = os.path.join(
clusterdatadir, f'cdips_targets_v{catalog_vnum}_nomagcut.csv'
)
if not os.path.exists(csvpath):
assemble_initial_source_list(catalog_vnum)
df = pd.read_csv(csvpath)
# made by assemble_initial_source_list above.
metapath = os.path.join(
clusterdatadir, f'list_of_lists_keys_paths_assembled_v{catalog_vnum}.csv'
)
metadf = | pd.read_csv(metapath) | pandas.read_csv |
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from sys import argv
dates=("2020-04-01", "2020-04-08", "2020-04-15", "2020-04-22",
"2020-04-29" ,"2020-05-06", "2020-05-13","2020-05-20", "2020-05-27", "2020-06-03",
"2020-06-10", "2020-06-17", "2020-06-24", "2020-07-01", "2020-07-08",
"2020-07-15", "2020-07-22", "2020-07-29", "2020-08-05", "2020-08-12",
"2020-08-19", "2020-08-26", "2020-09-02", "2020-09-16", "2020-09-23",
"2020-09-30", "2020-10-07", "2020-10-14", "2020-10-21")
days_list=(
60, 67, 74, 81, 88, 95, 102, 109, 116, 123, 130,
137, 144, 151, 158, 165, 172,179,186,193,200,207,
214, #skip 221, data missing 2020-09-09
228,235, 242, 249,256,263)
df = pd.DataFrame()
for i,date in enumerate(dates):
states = ['NSW','QLD','SA','TAS','VIC','WA','ACT','NT']
n_sims = int(argv[1])
start_date = '2020-03-01'
days = days_list[i]
forecast_type = "R_L" #default None
forecast_date = date #format should be '%Y-%m-%d'
end_date = | pd.to_datetime(start_date,format='%Y-%m-%d') | pandas.to_datetime |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants:
with pytest.raises(OverflowError):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError):
dtimin - variant
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_add_offset_array(self, tz, box):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_sub_offset_array(self, tz, box):
# GH#18824
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_with_offset_series(self, tz, names):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
expected_add = Series([dti[n] + other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
tm.assert_series_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_series_equal(res2, expected_add)
expected_sub = Series([dti[n] - other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res3 = dti - other
tm.assert_series_equal(res3, expected_sub)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - pd.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([ | Timestamp('2000-01-15 00:15:00', tz='US/Central') | pandas.Timestamp |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: li
@file: factor_cash_flow.py
@time: 2019-05-30
"""
import gc, six
import json
import numpy as np
import pandas as pd
from utilities.calc_tools import CalcTools
from utilities.singleton import Singleton
# from basic_derivation import app
# from ultron.cluster.invoke.cache_data import cache_data
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
@six.add_metaclass(Singleton)
class FactorCashFlow(object):
"""
现金流量
"""
def __init__(self):
__str__ = 'factor_cash_flow'
self.name = '财务指标'
self.factor_type1 = '财务指标'
self.factor_type2 = '现金流量'
self.description = '财务指标的二级指标-现金流量'
@staticmethod
def CashOfSales(tp_cash_flow, factor_cash_flow, dependencies=['net_operate_cash_flow', 'operating_revenue']):
"""
:name: 经验活动产生的现金流量净额/营业收入
:desc: 经营活动产生的现金流量净额/营业收入(MRQ)
:unit:
:view_dimension: 0.01
"""
cash_flow = tp_cash_flow.loc[:, dependencies]
cash_flow['CashOfSales'] = np.where(CalcTools.is_zero(cash_flow.operating_revenue.values),
0,
cash_flow.net_operate_cash_flow.values / cash_flow.operating_revenue.values)
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_cash_flow = pd.merge(factor_cash_flow, cash_flow, how='outer', on="security_code")
# factor_cash_flow['CashOfSales'] = cash_flow['CashOfSales']
return factor_cash_flow
@staticmethod
def NOCFToOpt(tp_cash_flow, factor_cash_flow, dependencies=['net_operate_cash_flow', 'total_operating_revenue', 'total_operating_cost']):
"""
:name: 经营活动产生的现金流量净额/(营业总收入-营业总成本)
:desc: 经营活动产生的现金流量净额/(营业总收入-营业总成本)
:unit:
:view_dimension: 0.01
"""
cash_flow = tp_cash_flow.loc[:, dependencies]
cash_flow['NOCFToOpt'] = np.where(
CalcTools.is_zero((cash_flow.total_operating_revenue.values - cash_flow.total_operating_cost.values)), 0,
cash_flow.net_operate_cash_flow.values / (
cash_flow.total_operating_revenue.values - cash_flow.total_operating_cost.values))
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_cash_flow = pd.merge(factor_cash_flow, cash_flow, how='outer', on="security_code")
# factor_cash_flow['NOCFToOpt'] = cash_flow['NOCFToOpt']
return factor_cash_flow
@staticmethod
def SalesServCashToOR(tp_cash_flow, factor_cash_flow, dependencies=['goods_sale_and_service_render_cash', 'operating_revenue']):
"""
:name: 销售商品和提供劳务收到的现金/营业收入
:desc: 销售商品和提供劳务收到的现金/营业收入
:unit:
:view_dimension: 0.01
"""
cash_flow = tp_cash_flow.loc[:, dependencies]
cash_flow['SalesServCashToOR'] = np.where(CalcTools.is_zero(cash_flow.operating_revenue.values),
0,
cash_flow.goods_sale_and_service_render_cash.values / cash_flow.operating_revenue.values)
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_cash_flow = | pd.merge(factor_cash_flow, cash_flow, how='outer', on="security_code") | pandas.merge |
#!pip install fitbit
#!pip install -r requirements/base.txt
#!pip install -r requirements/dev.txt
#!pip install -r requirements/test.txt
from time import sleep
import fitbit
import cherrypy
import requests
import json
import datetime
import scipy.stats
import pandas as pd
import numpy as np
# plotting
import matplotlib
from matplotlib import pyplot as plt
import seaborn as sns
get_ipython().magic(u'matplotlib inline')
with open('heartrate/HR2017-12-23.json') as f:
hr_dat_sample = json.loads(f.read())
parsed_json_hr_samp = json.loads(hr_dat_sample)
list(parsed_json_hr_samp['activities-heart-intraday'].keys())
# ## Heart Rate
dates = | pd.date_range('2017-12-23', '2018-01-25') | pandas.date_range |
import pandas as pd
import sys
def main(argv):
if len(argv) < 2:
print('Not enough arguments provided.')
return
in_dfs = []
for input_file in sys.argv[1:-1]:
in_dfs.append(pd.read_csv(input_file))
out_df = | pd.concat(in_dfs) | pandas.concat |
#!/usr/bin/env python
import numpy as np
import netCDF4 as nc
import pandas as pd
import multiprocessing
import textwrap
import matplotlib.pyplot as plt
import lhsmdu
import glob
import json
import os
import ast
import shutil
import subprocess
from contextlib import contextmanager
import param_util as pu
import output_utils as ou
@contextmanager
def log_wrapper(message,tag=''):
'''
Likely will abandon or repurpose this function.
Not super helpful as a log printer.'''
print('[SA:{}] {}'.format(tag, message))
try:
yield
finally:
print()
def generate_uniform(N, param_props):
'''
Generate sample matrix using uniform method.
Sample matrix will have one row for each "sample" of the
parameters. There will be one column for each parameter in
the `param_props` list.
Parameters
----------
N : int
number of samples (rows) to create
param_props : list of dicts
Each item in `param_props` list will be a dictionary
with at least the following:
>>> param_props = {
... 'name': 'rhq10', # name in dvmdostem parameter file (cmt_*.txt)
... 'bounds': [5.2, 6.4], # the min and max values the parameter can have
... }
Returns
-------
df : pandas.DataFrame, shape (N, len(param_props))
There will be one column for each parameter in the
`param_props` list and N rows (samples).
'''
print(param_props)
l = np.random.uniform(size=(N, len(param_props)))
# Generate bounds, based on specification in params list
lows = np.array([p['bounds'][0] for p in param_props])
highs = np.array([p['bounds'][1] for p in param_props])
# Figure out the spread, or difference between bounds
spreads = highs - lows
sm = l * spreads + lows
return pd.DataFrame(sm, columns=[p['name'] for p in param_props])
def generate_lhc(N, param_props):
'''
Generate sample matrix using Latin Hyper Cube method.
Sample matrix will have one row for each "sample" of the
parameters. There will be one column for each parameter in
the `param_props` list.
Parameters
----------
N : int
number of samples (rows) to create
param_props : list of dicts
Each item in `param_props` list will be a dictionary
with at least the following:
>>> param_props = {
... 'name': 'cmax', # name in dvmdostem parameter file (cmt_*.txt)
... 'bounds': [100.1, 105.1], # the min and max values the parameter can have
... }
Returns
-------
df : pandas.DataFrame, shape (N, len(param_props))
There will be one column for each parameter in the
`param_props` list and N rows (samples).
'''
# Generate bounds, based on specification in params list
lo_bounds = np.array([p['bounds'][0] for p in param_props])
hi_bounds = np.array([p['bounds'][1] for p in param_props])
# Figure out the spread, or difference between bounds
spreads = hi_bounds - lo_bounds
# ??
l = lhsmdu.sample(len(param_props), N)
# ??
l = lhsmdu.resample().T
# ??
mat_diff = np.diag(spreads)
# ??
sample_matrix = l * mat_diff + lo_bounds
return pd.DataFrame(sample_matrix, columns=[p['name'] for p in param_props])
class SensitivityDriver(object):
'''
Sensitivity Analysis Driver class.
Driver class for conducting dvmdostem SensitivityAnalysis.
Methods for cleaning, setup, running model, collecting outputs.
Basic overview of use is like this:
1. Instantiate driver object.
2. Setup/design the experiment (parameters, to use,
number of samples, etc)
3. Use driver object to setup the run folders.
4. Use driver object to carry out model runs.
5. Use driver object to summarize/collect outputs.
6. Use driver object to make plots, do analysis.
Parameters
----------
See Also
--------
Examples
--------
Instantiate object, sets pixel, outputs, working directory,
site selection (input data path)
>>> driver = SensitivityDriver()
Show info about the driver object:
>>> driver.design_experiment(5, 4, params=['cmax','rhq10','nfall(1)'], pftnums=[2,None,2])
>>> driver.sample_matrix
cmax rhq10 nfall(1)
0 63.536594 1.919504 0.000162
1 62.528847 2.161819 0.000159
2 67.606747 1.834203 0.000145
3 59.671967 2.042034 0.000171
4 57.711999 1.968631 0.000155
'''
def __init__(self, clean=False):
'''
Constructor
Hard code a bunch of stuff for now...
'''
# Made this one private because I don't want it to get confused with
# the later params directories that will be created in each run folder.
self.__initial_params = '/work/parameters'
self.work_dir = '/data/workflows/sensitivity_analysis'
self.site = '/data/input-catalog/cru-ts40_ar5_rcp85_ncar-ccsm4_CALM_Toolik_LTER_10x10/'
self.PXx = 0
self.PXy = 0
self.outputs = [
{ 'name': 'GPP', 'type': 'flux',},
{ 'name': 'VEGC','type': 'pool',},
]
if not os.path.isdir(self.work_dir):
os.mkdir(self.work_dir)
if clean:
self.clean()
def get_initial_params_dir(self):
'''Read only accessor to private member variable.'''
return self.__initial_params
def design_experiment(self, Nsamples, cmtnum, params, pftnums,
percent_diffs=None, sampling_method='lhc'):
'''
Builds bounds based on initial values found in dvmdostem parameter
files (cmt_*.txt files) and the `percent_diffs` array.
The `percent_diffs` array gets used to figure out how far
the bounds should be from the initial value. Defaults to initial
value +/-10%.
Sets instance values for `self.params` and `self.sample_matrix`.
Parameters
----------
Nsamples : int
How many samples to draw. One sample equates to one run to be done with
the parameter values in the sample.
cmtnum : int
Which community type number to use for initial parameter values, for
doing runs and analyzing outputs.
params : list of strings
List of parameter names to use in the experiment. Each name must be
in one of the dvmdostem parameter files (cmt_*.txt).
pftnums : list of ints
List of PFT numbers, one number for each parameter in `params`. Use
`None` in the list for any non-pft parameter (i.e. a soil parameter).
percent_diffs: list of floats
List values, one for each parameter in `params`. The value is used to
the bounds with respect to the intial parameter value. I.e. passing
a value in the percent_diff array of .3 would mean that bounds should
be +/-30% of the initial value of the parameter.
Returns
-------
None
'''
if not percent_diffs:
percent_diffs = np.ones(len(params)) * 0.1 # use 10% for default perturbation
assert len(params) == len(pftnums), "params list and pftnums list must be same length!"
assert len(params) == len(percent_diffs), "params list and percent_diffs list must be same length"
self.params = []
plu = pu.build_param_lookup(self.__initial_params)
for pname, pftnum, perturbation in zip(params, pftnums, percent_diffs):
original_pdata_file = pu.which_file(self.__initial_params, pname, lookup_struct=plu)
p_db = pu.get_CMT_datablock(original_pdata_file, cmtnum)
p_dd = pu.cmtdatablock2dict(p_db)
if pname in p_dd.keys():
p_initial = p_dd[pname]
else:
p_initial = p_dd['pft{}'.format(pftnum)][pname]
p_bounds = [p_initial - (p_initial*perturbation), p_initial + (p_initial*perturbation)]
self.params.append(dict(name=pname, bounds=p_bounds, initial=p_initial, cmtnum=cmtnum, pftnum=pftnum))
if sampling_method == 'lhc':
self.sample_matrix = generate_lhc(Nsamples, self.params)
elif sampling_method == 'uniform':
self.sample_matrix = self.generate_uniform(Nsamples, self.params)
def save_experiment(self, name=''):
'''Write the parameter properties and sensitivity matrix to files.'''
if name == '':
sm_fname = os.path.join(self.work_dir, 'sample_matrix.csv')
pp_fname = os.path.join(self.work_dir, 'param_props.csv')
else:
sm_fname = "{}_sample_matrix.csv".format(name)
pp_fname = '{}_param_props.csv'.format(name)
self.sample_matrix.to_csv(sm_fname, index=False)
pd.DataFrame(self.params).to_csv(pp_fname, index=False)
def load_experiment(self, param_props_path, sample_matrix_path):
'''Load parameter properties and sample matrix from files.'''
self.sample_matrix = | pd.read_csv(sample_matrix_path) | pandas.read_csv |
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import random
from math import sqrt
from datetime import datetime,timedelta
from pytz import timezone
from time import time
from collections import deque
from IPython.display import clear_output
from statsmodels.tools.eval_measures import mse
from statsmodels.tools.eval_measures import meanabs as mae
def test_index_gen(time_stamp_threshhold = '2008-01-01 00:00:00-08:00',test_time_num = 1800, test_airport_num = 60):
idx2airport=pd.read_csv("idx2airport.csv",index_col=0)['0'].to_dict()
idx2time_stamp=pd.read_csv("idx2time_stamp.csv",index_col=0)['0'].to_dict()
time_stamp2idx=pd.read_csv("time_stamp2idx.csv",index_col=0)['0'].to_dict()
random.seed(4)
test_airport_index = random.sample(idx2airport.keys(), k=test_airport_num)
test_date_index = random.sample(list(idx2time_stamp.keys())[time_stamp2idx[time_stamp_threshhold]:], k=test_time_num)
return test_date_index,test_airport_index
def test_index_gen(time_stamp_threshhold = '2008-01-01 00:00:00-08:00',test_time_num = 1800, test_airport_num = 60):
idx2airport=pd.read_csv("idx2airport.csv",index_col=0)['0'].to_dict()
idx2time_stamp=pd.read_csv("idx2time_stamp.csv",index_col=0)['0'].to_dict()
time_stamp2idx=pd.read_csv("time_stamp2idx.csv",index_col=0)['0'].to_dict()
random.seed(4)
test_airport_index = random.sample(idx2airport.keys(), k=test_airport_num)
test_date_index = random.sample(list(idx2time_stamp.keys())[time_stamp2idx[time_stamp_threshhold]:], k=test_time_num)
return test_date_index,test_airport_index
def rwse_eval(pred_data, test_date_index, test_airport_index):
arr_sche = pd.read_csv("ArrTotalFlights.csv",index_col=0)
dep_sche = pd.read_csv("DepTotalFlights.csv",index_col=0)
DelayRatio = pd.read_csv("DelayRatio.csv",index_col=0)
p = DelayRatio.fillna(0).iloc[test_date_index, test_airport_index]
for i in test_airport_index:
p[str(i)] = p[str(i)].values - pred_data[str(i)].values
numerator = 0
denominator = 0
for i in test_airport_index:
for j in test_date_index:
weight_wae = np.abs(arr_sche[str(i)].values[j]) + np.abs(arr_sche[str(i)].values[j])
numerator += (np.abs(p[str(i)].loc[j])**2) * (weight_wae**2)
for i in test_airport_index:
for j in test_date_index:
denominator += ((np.abs(arr_sche[str(i)].values[j]) + np.abs(arr_sche[str(i)].values[j])))**2
rwse = float(sqrt(numerator/denominator))
return rwse
def wae_eval(pred_data,test_date_index,test_airport_index):
arr_sche = pd.read_csv("ArrTotalFlights.csv",index_col=0)
dep_sche = | pd.read_csv("DepTotalFlights.csv",index_col=0) | pandas.read_csv |
from mock import patch
from ebmdatalab import bq
from pandas import DataFrame
import tempfile
import pytest
import os
def test_fingerprint_sql():
input_sql = 'select *, "Frob" from x -- comment\n' "where (a >= 4);"
same_sql_different_caps = 'SELECT *, "Frob" from x -- comment\n' "where (a >= 4);"
same_sql_different_quoted_caps = (
'SELECT *, "frob" from x -- comment\n' "where (a >= 4);"
)
same_sql_different_comment = (
'select *, "Frob" from x -- comment 2\n' "where (a >= 4);"
)
same_sql_different_whitespace = (
'select *, "Frob" from x -- comment 2\n' "\n" " where (a >= 4);"
)
fingerprint = bq.fingerprint_sql(input_sql)
assert fingerprint == bq.fingerprint_sql(same_sql_different_caps)
assert fingerprint != bq.fingerprint_sql(same_sql_different_quoted_caps)
assert fingerprint == bq.fingerprint_sql(same_sql_different_comment)
assert fingerprint == bq.fingerprint_sql(same_sql_different_whitespace)
@patch("ebmdatalab.bq.pd.read_gbq")
def test_cached_read(mock_read_gbq):
mock_read_gbq.return_value = DataFrame([{"a": 3}])
sql = "select * from foobar"
# Test identical SQL bypasses reading BQ
with tempfile.NamedTemporaryFile() as csv_file:
for _ in range(0, 2):
df = bq.cached_read(sql, csv_path=csv_file.name)
assert df.loc[0]["a"] == 3
assert mock_read_gbq.call_count == 1
# and now with `use_cache` param
df = bq.cached_read(sql, csv_path=csv_file.name, use_cache=False)
assert mock_read_gbq.call_count == 2
assert False
@patch("ebmdatalab.bq.pd.read_gbq")
def test_cached_read_no_csv_path(mock_read_gbq):
mock_read_gbq.return_value = DataFrame([{"a": 3}])
sql = "select * from foobar"
# Test no csv path raises error
with tempfile.NamedTemporaryFile() as csv_file:
with pytest.raises(AssertionError) as exc_info:
df = bq.cached_read(sql, csv_path="")
assert "You must supply csv_path" in str(exc_info.value)
@patch("ebmdatalab.bq.pd.read_gbq")
def test_cached_read_non_existing_csv_dir_made(mock_read_gbq):
mock_read_gbq.return_value = DataFrame([{"a": 3}])
sql = "select * from foobar"
# Make temporary folder to save temporary files in
folder = tempfile.TemporaryDirectory()
with tempfile.NamedTemporaryFile(dir=folder.name) as csv_file:
# Test csv_dir exists
df = bq.cached_read(sql, csv_path=csv_file.name)
assert os.path.exists(folder.name)
# Delete contents of temporary folder
for file in os.listdir(folder.name):
os.remove(f"{folder.name}/{file}")
# Delete temporary folder
os.rmdir(folder.name)
assert os.path.exists(folder.name) is False
# Test temporary folder is remade
df = bq.cached_read(sql, csv_path=csv_file.name)
assert os.path.exists(folder.name)
def _check_cached_read(csv_file, mock_read, sql, expected):
mock_read.return_value = expected
df = bq.cached_read(sql, csv_path=csv_file.name)
assert str(df) == str(expected)
@patch("ebmdatalab.bq.pd.read_gbq")
def test_old_cache_markers_removed(mock_read_gbq):
with tempfile.NamedTemporaryFile() as csv_file:
# First, cause some sql to be cached
inputs_and_outputs = [
("select * from foobar", DataFrame([{"a": 1}])),
("select * from foobar order by id", | DataFrame([{"a": 2}]) | pandas.DataFrame |
import pandas as pd
import bitfinex
from bitfinex.backtest import data
# old data...up to 2016 or so
btc_charts_url = 'http://api.bitcoincharts.com/v1/csv/bitfinexUSD.csv.gz'
df = pd.read_csv(btc_charts_url, names=['time', 'price', 'volume'])
df['time'] = | pd.to_datetime(df['time'], unit='s') | pandas.to_datetime |
"""Run the model calibration"""
# Spyder cannot run parallels, so always set -option_parallels=0 when testing in Spyder.
# Built-in libraries
import os
import argparse
import multiprocessing
import resource
import time
import inspect
# External libraries
from datetime import datetime
import pandas as pd
import numpy as np
import xarray as xr
import pymc
from pymc import deterministic
from scipy.optimize import minimize
import pickle
from scipy import stats
# Local libraries
import pygem.pygem_input as pygem_prms
import pygemfxns_modelsetup as modelsetup
import pygemfxns_massbalance as massbalance
import class_climate
import class_mbdata
#from memory_profiler import profile
#%% FUNCTIONS
def getparser():
"""
Use argparse to add arguments from the command line
Parameters
----------
ref_gcm_name (optional) : str
reference gcm name
num_simultaneous_processes (optional) : int
number of cores to use in parallels
option_parallels (optional) : int
switch to use parallels or not
rgi_glac_number_fn : str
filename of .pkl file containing a list of glacier numbers which is used to run batches on the supercomputer
rgi_glac_number : str
rgi glacier number to run for supercomputer
progress_bar : int
Switch for turning the progress bar on or off (default = 0 (off))
debug : int
Switch for turning debug printing on or off (default = 0 (off))
Returns
-------
Object containing arguments and their respective values.
"""
parser = argparse.ArgumentParser(description="run calibration in parallel")
# add arguments
parser.add_argument('-ref_gcm_name', action='store', type=str, default=pygem_prms.ref_gcm_name,
help='reference gcm name')
parser.add_argument('-num_simultaneous_processes', action='store', type=int, default=4,
help='number of simultaneous processes (cores) to use')
parser.add_argument('-option_parallels', action='store', type=int, default=1,
help='Switch to use or not use parallels (1 - use parallels, 0 - do not)')
parser.add_argument('-rgi_glac_number_fn', action='store', type=str, default=None,
help='Filename containing list of rgi_glac_number, helpful for running batches on spc')
parser.add_argument('-progress_bar', action='store', type=int, default=0,
help='Boolean for the progress bar to turn it on or off (default 0 is off)')
parser.add_argument('-debug', action='store', type=int, default=0,
help='Boolean for debugging to turn it on or off (default 0 is off)')
parser.add_argument('-rgi_glac_number', action='store', type=str, default=None,
help='rgi glacier number for supercomputer')
return parser
def mb_mwea_calc(modelparameters, glacier_rgi_table, glacier_area_initial, icethickness_initial, width_initial,
elev_bins, glacier_gcm_temp, glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev,
glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table, t1_idx, t2_idx, t1, t2,
option_areaconstant=1, return_tc_mustmelt=0, return_volremaining=0):
"""
Run the mass balance and calculate the mass balance [mwea]
Parameters
----------
option_areaconstant : int
Switch to keep area constant (1) or not (0)
Returns
-------
mb_mwea : float
mass balance [m w.e. a-1]
"""
# Number of constant years
startyear_doy = (pd.to_datetime(pd.DataFrame({'year':[dates_table.loc[0,'date'].year],
'month':[dates_table.loc[0,'date'].month],
'day':[dates_table.loc[0,'date'].day]}))
.dt.strftime("%j").astype(float).values[0])
startyear_daysinyear = (
(pd.to_datetime(pd.DataFrame({'year':[dates_table.loc[0,'date'].year], 'month':[12], 'day':[31]})) -
pd.to_datetime(pd.DataFrame({'year':[dates_table.loc[0,'date'].year], 'month':[1], 'day':[1]})))
.dt.days + 1).values[0]
startyear_decimal = dates_table.loc[0,'date'].year + startyear_doy / startyear_daysinyear
constantarea_years = int(t1 - startyear_decimal)
# Mass balance calculations
(glac_bin_temp, glac_bin_prec, glac_bin_acc, glac_bin_refreeze, glac_bin_snowpack, glac_bin_melt,
glac_bin_frontalablation, glac_bin_massbalclim, glac_bin_massbalclim_annual, glac_bin_area_annual,
glac_bin_icethickness_annual, glac_bin_width_annual, glac_bin_surfacetype_annual,
glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline, glac_wide_snowpack,
glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual, offglac_wide_prec,
offglac_wide_refreeze, offglac_wide_melt, offglac_wide_snowpack, offglac_wide_runoff) = (
massbalance.runmassbalance(modelparameters, glacier_rgi_table, glacier_area_initial, icethickness_initial,
width_initial, elev_bins, glacier_gcm_temp, glacier_gcm_tempstd, glacier_gcm_prec,
glacier_gcm_elev, glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table,
option_areaconstant=option_areaconstant, constantarea_years=constantarea_years,
debug=False))
# Option to return must melt condition
if return_tc_mustmelt == 1:
# Climatic mass balance of lowermost bin must be negative at some point
glac_bin_area_annual_mask = glac_bin_area_annual.copy()
glac_bin_area_annual_mask[glac_bin_area_annual_mask>0] = 1
lowestbin_idx = np.argmax(glac_bin_area_annual_mask > 0, axis=0)
lowestbin_mbclim_annual = (
glac_bin_massbalclim_annual[list(lowestbin_idx)[:-1], np.arange(0,lowestbin_idx.shape[0]-1)])
nyears_negmbclim = np.sum([1 if x < 0 else 0 for x in lowestbin_mbclim_annual])
return nyears_negmbclim
elif return_volremaining == 1:
# Ensure volume by end of century is zero
# Compute glacier volume change for every time step and use this to compute mass balance
glac_wide_area = glac_wide_area_annual[:-1].repeat(12)
# Mass change [km3 mwe]
# mb [mwea] * (1 km / 1000 m) * area [km2]
glac_wide_masschange = glac_wide_massbaltotal / 1000 * glac_wide_area
# Mean annual mass balance [mwea]
mb_mwea = glac_wide_masschange[t1_idx:t2_idx+1].sum() / glac_wide_area[0] * 1000 / (t2 - t1)
t2_yearidx = int(np.ceil(t2 - startyear_decimal))
return mb_mwea, glac_wide_volume_annual[t2_yearidx]
# Return mass balance
else:
# Compute glacier volume change for every time step and use this to compute mass balance
glac_wide_area = glac_wide_area_annual[:-1].repeat(12)
# Mass change [km3 mwe]
# mb [mwea] * (1 km / 1000 m) * area [km2]
glac_wide_masschange = glac_wide_massbaltotal / 1000 * glac_wide_area
# Mean annual mass balance [mwea]
mb_mwea = glac_wide_masschange[t1_idx:t2_idx+1].sum() / glac_wide_area[0] * 1000 / (t2 - t1)
return mb_mwea
def retrieve_priors(modelparameters, glacier_rgi_table, glacier_area_initial, icethickness_initial, width_initial,
elev_bins, glacier_gcm_temp, glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev,
glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table, t1_idx, t2_idx, t1, t2, debug=False):
"""
Calculate parameters for prior distributions for the MCMC analysis
Parameters
----------
modelparameters : np.array
glacier model parameters
glacier_rgi_table : pd.DataFrame
table of RGI information for a particular glacier
glacier_area_initial, icethickness_initial, width_initial, elev_bins : np.arrays
relevant glacier properties data
glacier_gcm_temp, glacier_gcm_prec, glacier_gcm_elev, glacier_gcm_lrgcm, glacier_gcm_lrglac : np.arrays
relevant glacier climate data
dates_table : pd.DataFrame
table of date/time information
observed_massbal, mb_obs_min, mb_obs_max, t1, t2, t1_idx, t2_idx: floats (all except _idx) and integers (_idx)
values related to the mass balance observations and their proper date/time indices
debug : boolean
switch to debug the function or not (default=False)
return_tc_mustmelt : integer
switch to return the mass balance (default, 0) or number of years lowermost bin has negative mass balance (1)
Returns
-------
kp_bndlow, kp_bndhigh, kp_mu, kp_start : floats
data for precipitation factor's prior distribution
tbias_bndlow, tbias_bndhigh, tbias_mu, tbias_sigma, tbias_start : floats
data for temperature bias' prior distribution
tbias_max_loss, tbias_max_acc, mb_max_loss, mb_max_acc : floats
temperature change and mass balance associated with maximum accumulation and maximum loss
"""
# ----- TEMPBIAS: max accumulation -----
# Lower temperature bound based on max positive mass balance adjusted to avoid edge effects
# Temperature at the lowest bin
# T_bin = T_gcm + lr_gcm * (z_ref - z_gcm) + lr_glac * (z_bin - z_ref) + tbias
lowest_bin = np.where(glacier_area_initial > 0)[0][0]
tbias_max_acc = (-1 * (glacier_gcm_temp + glacier_gcm_lrgcm *
(elev_bins[lowest_bin] - glacier_gcm_elev)).max())
if debug:
print('tc_max_acc:', np.round(tbias_max_acc,2))
# ----- TEMPBIAS: UPPER BOUND -----
# MAXIMUM LOSS - AREA EVOLVING
# note: the mb_mwea_calc function ensures the area is constant until t1 such that the glacier is not completely
# lost before t1; otherwise, this will fail at high TC values
mb_max_loss = (-1 * (glacier_area_initial * icethickness_initial).sum() / glacier_area_initial.sum() *
pygem_prms.density_ice / pygem_prms.density_water / (t2 - t1))
if debug:
print('mb_max_loss:', np.round(mb_max_loss,2), 'kp:', np.round(modelparameters[2],2))
# Looping forward and backward to ensure optimization does not get stuck
modelparameters[7] = tbias_max_acc
mb_mwea_1, vol_remaining = mb_mwea_calc(
modelparameters, glacier_rgi_table, glacier_area_initial, icethickness_initial, width_initial, elev_bins,
glacier_gcm_temp, glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev, glacier_gcm_lrgcm,
glacier_gcm_lrglac, dates_table, t1_idx, t2_idx, t1, t2,
option_areaconstant=0, return_volremaining=1)
# use absolute value because with area evolving the maximum value is a limit
while vol_remaining > 0:
modelparameters[7] = modelparameters[7] + 1
mb_mwea_1, vol_remaining = mb_mwea_calc(
modelparameters, glacier_rgi_table, glacier_area_initial, icethickness_initial, width_initial,
elev_bins, glacier_gcm_temp, glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev, glacier_gcm_lrgcm,
glacier_gcm_lrglac, dates_table, t1_idx, t2_idx, t1, t2,
option_areaconstant=0, return_volremaining=1)
if debug:
print('mb_mwea_1:', np.round(mb_mwea_1,2), 'TC:', np.round(modelparameters[7],2),
'mb_max_loss:', np.round(mb_max_loss,2), 'vol_left:', np.round(vol_remaining,4))
# Looping backward for tbias at max loss
while vol_remaining == 0:
modelparameters[7] = modelparameters[7] - 0.05
mb_mwea_1, vol_remaining = mb_mwea_calc(
modelparameters, glacier_rgi_table, glacier_area_initial, icethickness_initial, width_initial,
elev_bins, glacier_gcm_temp, glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev, glacier_gcm_lrgcm,
glacier_gcm_lrglac, dates_table, t1_idx, t2_idx, t1, t2,
option_areaconstant=0, return_volremaining=1)
if debug:
print('vol_left:', np.round(vol_remaining,4), 'mb_mwea_1:', np.round(mb_mwea_1,2),
'TC:', np.round(modelparameters[7],2))
tbias_max_loss = modelparameters[7]
tbias_bndhigh = tbias_max_loss
if debug:
print('tc_max_loss:', np.round(tbias_max_loss,2), 'mb_max_loss:', np.round(mb_max_loss,2))
# Lower bound based on must melt condition
# note: since the mass balance ablation is conditional on the glacier evolution, there can be cases where higher
# temperature biases still have 0 for nyears_negmbclim. Hence, the need to loop beyond the first instance, and
# then go back and check that you're using the good cases from there onward. This ensures starting point is good
modelparameters[7] = tbias_max_acc
nyears_negmbclim = mb_mwea_calc(
modelparameters, glacier_rgi_table, glacier_area_initial, icethickness_initial, width_initial,
elev_bins, glacier_gcm_temp, glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev, glacier_gcm_lrgcm,
glacier_gcm_lrglac, dates_table, t1_idx, t2_idx, t1, t2,
option_areaconstant=0, return_tc_mustmelt=1)
nyears_negmbclim_list = [nyears_negmbclim]
tc_negmbclim_list = [modelparameters[7]]
tc_smallstep_switch = False
while nyears_negmbclim < 10 and modelparameters[7] < tbias_max_loss:
# Switch from large to small step sizes to speed up calculations
if tc_smallstep_switch == False:
tc_stepsize = 1
else:
tc_stepsize = 0.05
modelparameters_old = modelparameters[7]
modelparameters[7] += tc_stepsize
nyears_negmbclim = mb_mwea_calc(
modelparameters, glacier_rgi_table, glacier_area_initial, icethickness_initial, width_initial,
elev_bins, glacier_gcm_temp, glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev, glacier_gcm_lrgcm,
glacier_gcm_lrglac, dates_table, t1_idx, t2_idx, t1, t2,
option_areaconstant=0, return_tc_mustmelt=1)
# Record if using big step and no there is no melt or if using small step and there is melt
if nyears_negmbclim == 0 or (nyears_negmbclim > 0 and tc_smallstep_switch == True):
nyears_negmbclim_list.append(nyears_negmbclim)
tc_negmbclim_list.append(modelparameters[7])
# First time nyears_negmbclim is > 0, flip the switch to use smalll step and restart with last tbias
if nyears_negmbclim > 0 and tc_smallstep_switch == False:
tc_smallstep_switch = True
modelparameters[7] = modelparameters_old
nyears_negmbclim = 0
if debug:
print('TC:', np.round(modelparameters[7],2), 'nyears_negmbclim:', nyears_negmbclim)
tbias_bndlow = tc_negmbclim_list[np.where(np.array(nyears_negmbclim_list) == 0)[0][-1] + 1]
return tbias_bndlow, tbias_bndhigh, mb_max_loss
def main(list_packed_vars):
"""
Model calibration
Parameters
----------
list_packed_vars : list
list of packed variables that enable the use of parallels
Returns
-------
netcdf files of the calibration output
Depending on the calibration scheme additional output may be exported as well
"""
# Unpack variables
count = list_packed_vars[0]
gcm_name = list_packed_vars[1]
main_glac_rgi = list_packed_vars[2]
main_glac_hyps = list_packed_vars[3]
main_glac_icethickness = list_packed_vars[4]
main_glac_width = list_packed_vars[5]
gcm_temp = list_packed_vars[6]
gcm_tempstd = list_packed_vars[7]
gcm_prec = list_packed_vars[8]
gcm_elev = list_packed_vars[9]
gcm_lr = list_packed_vars[10]
cal_data = list_packed_vars[11]
time_start = time.time()
parser = getparser()
args = parser.parse_args()
if args.debug == 1:
debug = True
else:
debug = False
# ===== CALIBRATION =====
# Option 2: use MCMC method to determine posterior probability distributions of the three parameters tbias,
# ddfsnow and kp. Then create an ensemble of parameter sets evenly sampled from these
# distributions, and output these sets of parameters and their corresponding mass balances to be used in
# the simulations.
if pygem_prms.option_calibration == 'MCMC':
# ===== Define functions needed for MCMC method =====
def run_MCMC(kp_disttype=pygem_prms.kp_disttype,
kp_gamma_alpha=pygem_prms.kp_gamma_alpha,
kp_gamma_beta=pygem_prms.kp_gamma_beta,
kp_lognorm_mu=pygem_prms.kp_lognorm_mu,
kp_lognorm_tau=pygem_prms.kp_lognorm_tau,
kp_mu=pygem_prms.kp_mu, kp_sigma=pygem_prms.kp_sigma,
kp_bndlow=pygem_prms.kp_bndlow, kp_bndhigh=pygem_prms.kp_bndhigh,
kp_start=pygem_prms.kp_start,
tbias_disttype=pygem_prms.tbias_disttype,
tbias_mu=pygem_prms.tbias_mu, tbias_sigma=pygem_prms.tbias_sigma,
tbias_bndlow=pygem_prms.tbias_bndlow, tbias_bndhigh=pygem_prms.tbias_bndhigh,
tbias_start=pygem_prms.tbias_start,
ddfsnow_disttype=pygem_prms.ddfsnow_disttype,
ddfsnow_mu=pygem_prms.ddfsnow_mu, ddfsnow_sigma=pygem_prms.ddfsnow_sigma,
ddfsnow_bndlow=pygem_prms.ddfsnow_bndlow, ddfsnow_bndhigh=pygem_prms.ddfsnow_bndhigh,
ddfsnow_start=pygem_prms.ddfsnow_start,
iterations=10, burn=0, thin=pygem_prms.thin_interval, tune_interval=1000, step=None,
tune_throughout=True, save_interval=None, burn_till_tuned=False, stop_tuning_after=5, verbose=0,
progress_bar=args.progress_bar, dbname=None,
use_potentials=1, mb_max_loss=None
):
"""
Runs the MCMC algorithm.
Runs the MCMC algorithm by setting the prior distributions and calibrating the probability distributions of
three parameters for the mass balance function.
Parameters
----------
kp_disttype : str
Distribution type of precipitation factor (either 'lognormal', 'uniform', or 'custom')
kp_lognorm_mu : float
Lognormal mean of precipitation factor (default assigned from input)
kp_lognorm_tau : float
Lognormal tau (1/variance) of precipitation factor (default assigned from input)
kp_mu : float
Mean of precipitation factor (default assigned from input)
kp_sigma : float
Standard deviation of precipitation factor (default assigned from input)
kp_bndlow : float
Lower boundary of precipitation factor (default assigned from input)
kp_bndhigh : float
Upper boundary of precipitation factor (default assigned from input)
kp_start : float
Starting value of precipitation factor for sampling iterations (default assigned from input)
tbias_disttype : str
Distribution type of tbias (either 'truncnormal' or 'uniform')
tbias_mu : float
Mean of temperature change (default assigned from input)
tbias_sigma : float
Standard deviation of temperature change (default assigned from input)
tbias_bndlow : float
Lower boundary of temperature change (default assigned from input)
tbias_bndhigh: float
Upper boundary of temperature change (default assigned from input)
tbias_start : float
Starting value of temperature change for sampling iterations (default assigned from input)
ddfsnow_disttype : str
Distribution type of degree day factor of snow (either 'truncnormal' or 'uniform')
ddfsnow_mu : float
Mean of degree day factor of snow (default assigned from input)
ddfsnow_sigma : float
Standard deviation of degree day factor of snow (default assigned from input)
ddfsnow_bndlow : float
Lower boundary of degree day factor of snow (default assigned from input)
ddfsnow_bndhigh : float
Upper boundary of degree day factor of snow (default assigned from input)
ddfsnow_start : float
Starting value of degree day factor of snow for sampling iterations (default assigned from input)
iterations : int
Total number of iterations to do (default 10).
burn : int
Variables will not be tallied until this many iterations are complete (default 0).
thin : int
Variables will be tallied at intervals of this many iterations (default 1).
tune_interval : int
Step methods will be tuned at intervals of this many iterations (default 1000).
step : str
Choice of step method to use (default metropolis-hastings).
tune_throughout : boolean
If true, tuning will continue after the burnin period; otherwise tuning will halt at the end of the
burnin period (default True).
save_interval : int or None
If given, the model state will be saved at intervals of this many iterations (default None).
burn_till_tuned: boolean
If True the Sampler will burn samples until all step methods are tuned. A tuned step methods is one
that was not tuned for the last `stop_tuning_after` tuning intervals. The burn-in phase will have a
minimum of 'burn' iterations but could be longer if tuning is needed. After the phase is done the
sampler will run for another (iter - burn) iterations, and will tally the samples according to the
'thin' argument. This means that the total number of iteration is updated throughout the sampling
procedure. If True, it also overrides the tune_thorughout argument, so no step method will be tuned
when sample are being tallied (default False).
stop_tuning_after: int
The number of untuned successive tuning interval needed to be reached in order for the burn-in phase to
be done (if burn_till_tuned is True) (default 5).
verbose : int
An integer controlling the verbosity of the models output for debugging (default 0).
progress_bar : boolean
Display progress bar while sampling (default True).
dbname : str
Choice of database name the sample should be saved to (default 'trial.pickle').
use_potentials : int
Switch to turn off(0) or on (1) use of potential functions to further constrain likelihood functionns
mb_max_loss : float
Mass balance [mwea] at which the glacier completely melts
Returns
-------
pymc.MCMC.MCMC
Returns a model that contains sample traces of tbias, ddfsnow, kp and massbalance. These
samples can be accessed by calling the trace attribute. For example:
model.trace('ddfsnow')[:]
gives the trace of ddfsnow values.
A trace, or Markov Chain, is an array of values outputed by the MCMC simulation which defines the
posterior probability distribution of the variable at hand.
"""
# ===== PRIOR DISTRIBUTIONS =====
# Precipitation factor [-]
if kp_disttype == 'gamma':
kp = pymc.Gamma('kp', alpha=kp_gamma_alpha, beta=kp_gamma_beta,
value=kp_start)
elif kp_disttype =='lognormal':
# lognormal distribution (roughly 0.3 to 3)
kp_start = np.exp(kp_start)
kp = pymc.Lognormal('kp', mu=kp_lognorm_mu, tau=kp_lognorm_tau,
value=kp_start)
elif kp_disttype == 'uniform':
kp = pymc.Uniform('kp', lower=kp_bndlow, upper=kp_bndhigh,
value=kp_start)
# Temperature change [degC]
if tbias_disttype == 'normal':
tbias = pymc.Normal('tbias', mu=tbias_mu, tau=1/(tbias_sigma**2),
value=tbias_start)
elif tbias_disttype =='truncnormal':
tbias = pymc.TruncatedNormal('tbias', mu=tbias_mu, tau=1/(tbias_sigma**2),
a=tbias_bndlow, b=tbias_bndhigh, value=tbias_start)
elif tbias_disttype =='uniform':
tbias = pymc.Uniform('tbias', lower=tbias_bndlow, upper=tbias_bndhigh,
value=tbias_start)
# Degree day factor of snow [mwe degC-1 d-1]
# always truncated normal distribution with mean 0.0041 mwe degC-1 d-1 and standard deviation of 0.0015
# (Braithwaite, 2008), since it's based on data; uniform should only be used for testing
if ddfsnow_disttype == 'truncnormal':
ddfsnow = pymc.TruncatedNormal('ddfsnow', mu=ddfsnow_mu, tau=1/(ddfsnow_sigma**2), a=ddfsnow_bndlow,
b=ddfsnow_bndhigh, value=ddfsnow_start)
if ddfsnow_disttype == 'uniform':
ddfsnow = pymc.Uniform('ddfsnow', lower=ddfsnow_bndlow, upper=ddfsnow_bndhigh,
value=ddfsnow_start)
# ===== DETERMINISTIC FUNCTION ====
# Define deterministic function for MCMC model based on our a priori probobaility distributions.
@deterministic(plot=False)
def massbal(tbias=tbias, kp=kp, ddfsnow=ddfsnow):
"""
Likelihood function for mass balance [mwea] based on model parameters
"""
modelparameters_copy = modelparameters.copy()
if tbias is not None:
modelparameters_copy[7] = float(tbias)
if kp is not None:
modelparameters_copy[2] = float(kp)
if ddfsnow is not None:
modelparameters_copy[4] = float(ddfsnow)
# Degree day factor of ice is proportional to ddfsnow
modelparameters_copy[5] = modelparameters_copy[4] / pygem_prms.ddfsnow_iceratio
# Mass balance calculations
(glac_bin_temp, glac_bin_prec, glac_bin_acc, glac_bin_refreeze, glac_bin_snowpack, glac_bin_melt,
glac_bin_frontalablation, glac_bin_massbalclim, glac_bin_massbalclim_annual, glac_bin_area_annual,
glac_bin_icethickness_annual, glac_bin_width_annual, glac_bin_surfacetype_annual,
glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline, glac_wide_snowpack,
glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual, offglac_wide_prec,
offglac_wide_refreeze, offglac_wide_melt, offglac_wide_snowpack, offglac_wide_runoff) = (
massbalance.runmassbalance(modelparameters_copy, glacier_rgi_table, glacier_area_initial,
icethickness_initial, width_initial, elev_bins, glacier_gcm_temp,
glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev,
glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table,
option_areaconstant=0))
# Compute glacier volume change for every time step and use this to compute mass balance
glac_wide_area = glac_wide_area_annual[:-1].repeat(12)
# Mass change [km3 mwe]
# mb [mwea] * (1 km / 1000 m) * area [km2]
glac_wide_masschange = glac_wide_massbaltotal / 1000 * glac_wide_area
# Mean annual mass balance [mwea]
mb_mwea = glac_wide_masschange[t1_idx:t2_idx+1].sum() / glac_wide_area[0] * 1000 / (t2 - t1)
return mb_mwea
# ===== POTENTIAL FUNCTION =====
# Potential functions are used to impose additional constrains on the model
@pymc.potential
def mb_max(mb_max_loss=mb_max_loss, massbal=massbal):
"""Model parameters cannot completely melt the glacier, i.e., reject any parameter set within 0.01 mwea
of completely melting the glacier"""
if massbal < mb_max_loss:
return -np.inf
else:
return 0
@pymc.potential
def must_melt(tbias=tbias, kp=kp, ddfsnow=ddfsnow):
"""
Likelihood function for mass balance [mwea] based on model parameters
"""
modelparameters_copy = modelparameters.copy()
if tbias is not None:
modelparameters_copy[7] = float(tbias)
if kp is not None:
modelparameters_copy[2] = float(kp)
if ddfsnow is not None:
modelparameters_copy[4] = float(ddfsnow)
# Degree day factor of ice is proportional to ddfsnow
modelparameters_copy[5] = modelparameters_copy[4] / pygem_prms.ddfsnow_iceratio
# Mass balance calculations
(glac_bin_temp, glac_bin_prec, glac_bin_acc, glac_bin_refreeze, glac_bin_snowpack, glac_bin_melt,
glac_bin_frontalablation, glac_bin_massbalclim, glac_bin_massbalclim_annual, glac_bin_area_annual,
glac_bin_icethickness_annual, glac_bin_width_annual, glac_bin_surfacetype_annual,
glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline, glac_wide_snowpack,
glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual, offglac_wide_prec,
offglac_wide_refreeze, offglac_wide_melt, offglac_wide_snowpack, offglac_wide_runoff) = (
massbalance.runmassbalance(modelparameters_copy, glacier_rgi_table, glacier_area_initial,
icethickness_initial, width_initial, elev_bins, glacier_gcm_temp,
glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev,
glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table,
option_areaconstant=0))
# Climatic mass balance of lowermost bin must be negative at some point
glac_idx = np.where(glac_bin_area_annual > 0)[0][0]
lower_massbalclim_annual = glac_bin_massbalclim_annual[glac_idx,:].tolist()
# Number of years with negative climatic mass balance
nyears_negmbclim = np.sum([1 if x < 0 else 0 for x in lower_massbalclim_annual])
if nyears_negmbclim > 0:
return 0
else:
return -np.inf
# ===== OBSERVED DATA =====
# Observed data defines the observed likelihood of mass balances (based on geodetic observations)
obs_massbal = pymc.Normal('obs_massbal', mu=massbal, tau=(1/(observed_error**2)),
value=float(observed_massbal), observed=True)
# Set model
if use_potentials == 1:
model = pymc.MCMC([{'kp':kp, 'tbias':tbias, 'ddfsnow':ddfsnow,
'massbal':massbal, 'obs_massbal':obs_massbal}, mb_max, must_melt])
else:
model = pymc.MCMC({'kp':kp, 'tbias':tbias, 'ddfsnow':ddfsnow,
'massbal':massbal, 'obs_massbal':obs_massbal})
# if dbname is not None:
# model = pymc.MCMC({'kp':kp, 'tbias':tbias, 'ddfsnow':ddfsnow,
# 'massbal':massbal, 'obs_massbal':obs_massbal}, db='pickle', dbname=dbname)
# Step method (if changed from default)
# Adaptive metropolis is supposed to perform block update, i.e., update all model parameters together based
# on their covariance, which would reduce autocorrelation; however, tests show doesn't make a difference.
if step == 'am':
model.use_step_method(pymc.AdaptiveMetropolis, [kp, tbias, ddfsnow], delay = 1000)
# Sample
if args.progress_bar == 1:
progress_bar_switch = True
else:
progress_bar_switch = False
model.sample(iter=iterations, burn=burn, thin=thin,
tune_interval=tune_interval, tune_throughout=tune_throughout,
save_interval=save_interval, verbose=verbose, progress_bar=progress_bar_switch)
# Close database
model.db.close()
return model
#%%
# ===== Begin MCMC process =====
# loop through each glacier selected
for glac in range(main_glac_rgi.shape[0]):
# if debug:
print(count, main_glac_rgi.loc[main_glac_rgi.index.values[glac],'RGIId_float'])
# Set model parameters
modelparameters = [pygem_prms.lrgcm, pygem_prms.lrglac, pygem_prms.kp, pygem_prms.precgrad, pygem_prms.ddfsnow, pygem_prms.ddfice,
pygem_prms.tsnow_threshold, pygem_prms.tbias]
# Select subsets of data
glacier_rgi_table = main_glac_rgi.loc[main_glac_rgi.index.values[glac], :]
glacier_gcm_elev = gcm_elev[glac]
glacier_gcm_prec = gcm_prec[glac,:]
glacier_gcm_temp = gcm_temp[glac,:]
glacier_gcm_tempstd = gcm_tempstd[glac,:]
glacier_gcm_lrgcm = gcm_lr[glac,:]
glacier_gcm_lrglac = glacier_gcm_lrgcm.copy()
glacier_area_initial = main_glac_hyps.iloc[glac,:].values.astype(float)
icethickness_initial = main_glac_icethickness.iloc[glac,:].values.astype(float)
width_initial = main_glac_width.iloc[glac,:].values.astype(float)
glacier_cal_data = ((cal_data.iloc[np.where(
glacier_rgi_table['rgino_str'] == cal_data['glacno'])[0],:]).copy())
glacier_str = '{0:0.5f}'.format(glacier_rgi_table['RGIId_float'])
# Select observed mass balance, error, and time data
cal_idx = glacier_cal_data.index.values[0]
# Note: index to main_glac_rgi may differ from cal_idx
t1 = glacier_cal_data.loc[cal_idx, 't1']
t2 = glacier_cal_data.loc[cal_idx, 't2']
t1_idx = int(glacier_cal_data.loc[cal_idx,'t1_idx'])
t2_idx = int(glacier_cal_data.loc[cal_idx,'t2_idx'])
# Observed mass balance [mwea]
observed_massbal = glacier_cal_data.loc[cal_idx,'mb_mwe'] / (t2 - t1)
observed_error = glacier_cal_data.loc[cal_idx,'mb_mwe_err'] / (t2 - t1)
if debug:
print('observed_massbal:', np.round(observed_massbal,2), 'observed_error:',np.round(observed_error,2))
# ===== RUN MARKOV CHAIN MONTE CARLO METHOD ====================
if icethickness_initial.max() > 0:
# Regional priors
kp_gamma_alpha = pygem_prms.kp_gamma_region_dict[glacier_rgi_table.loc['region']][0]
kp_gamma_beta = pygem_prms.kp_gamma_region_dict[glacier_rgi_table.loc['region']][1]
tbias_mu = pygem_prms.tbias_norm_region_dict[glacier_rgi_table.loc['region']][0]
tbias_sigma = pygem_prms.tbias_norm_region_dict[glacier_rgi_table.loc['region']][1]
# fit the MCMC model
for n_chain in range(0,pygem_prms.n_chains):
if debug:
print('\n', glacier_str, ' chain' + str(n_chain))
if n_chain == 0:
# Starting values: middle
tbias_start = tbias_mu
kp_start = kp_gamma_alpha / kp_gamma_beta
ddfsnow_start = pygem_prms.ddfsnow_mu
elif n_chain == 1:
# Starting values: lowest
tbias_start = tbias_mu - 1.96 * tbias_sigma
ddfsnow_start = pygem_prms.ddfsnow_mu - 1.96 * pygem_prms.ddfsnow_sigma
kp_start = stats.gamma.ppf(0.05,kp_gamma_alpha, scale=1/kp_gamma_beta)
elif n_chain == 2:
# Starting values: high
tbias_start = tbias_mu + 1.96 * tbias_sigma
ddfsnow_start = pygem_prms.ddfsnow_mu + 1.96 * pygem_prms.ddfsnow_sigma
kp_start = stats.gamma.ppf(0.95,kp_gamma_alpha, scale=1/kp_gamma_beta)
# Determine bounds to check TC starting values and estimate maximum mass loss
modelparameters[2] = kp_start
modelparameters[4] = ddfsnow_start
modelparameters[5] = ddfsnow_start / pygem_prms.ddfsnow_iceratio
tbias_bndlow, tbias_bndhigh, mb_max_loss = (
retrieve_priors(modelparameters, glacier_rgi_table, glacier_area_initial, icethickness_initial,
width_initial, elev_bins, glacier_gcm_temp, glacier_gcm_tempstd,
glacier_gcm_prec, glacier_gcm_elev, glacier_gcm_lrgcm, glacier_gcm_lrglac,
dates_table, t1_idx, t2_idx, t1, t2, debug=False))
if debug:
print('\nTC_low:', np.round(tbias_bndlow,2),
'TC_high:', np.round(tbias_bndhigh,2),
'mb_max_loss:', np.round(mb_max_loss,2))
# Check that tbias mu and sigma is somewhat within bndhigh and bndlow
if tbias_start > tbias_bndhigh:
tbias_start = tbias_bndhigh
elif tbias_start < tbias_bndlow:
tbias_start = tbias_bndlow
# # Check that tbias mu and sigma is somewhat within bndhigh and bndlow
# if ((tbias_bndhigh < tbias_mu - 3 * tbias_sigma) or
# (tbias_bndlow > tbias_mu + 3 * tbias_sigma)):
# tbias_mu = np.mean([tbias_bndlow, tbias_bndhigh])
# tbias_sigma = (tbias_bndhigh - tbias_bndlow) / 6
if debug:
print('\ntc_start:', np.round(tbias_start,3),
'\npf_start:', np.round(kp_start,3),
'\nddf_start:', np.round(ddfsnow_start,4))
model = run_MCMC(iterations=pygem_prms.mcmc_sample_no, burn=pygem_prms.mcmc_burn_no, step=pygem_prms.mcmc_step,
kp_gamma_alpha=kp_gamma_alpha,
kp_gamma_beta=kp_gamma_beta,
kp_start=kp_start,
tbias_mu=tbias_mu, tbias_sigma=tbias_sigma,
tbias_start=tbias_start,
ddfsnow_start=ddfsnow_start,
mb_max_loss=mb_max_loss)
if debug:
print('\nacceptance ratio:', model.step_method_dict[next(iter(model.stochastics))][0].ratio)
# Select data from model to be stored in netcdf
df = pd.DataFrame({'tbias': model.trace('tbias')[:],
'kp': model.trace('kp')[:],
'ddfsnow': model.trace('ddfsnow')[:],
'massbal': model.trace('massbal')[:]})
# set columns for other variables
df['ddfice'] = df['ddfsnow'] / pygem_prms.ddfsnow_iceratio
df['lrgcm'] = np.full(df.shape[0], pygem_prms.lrgcm)
df['lrglac'] = np.full(df.shape[0], pygem_prms.lrglac)
df['precgrad'] = np.full(df.shape[0], pygem_prms.precgrad)
df['tsnow_threshold'] = np.full(df.shape[0], pygem_prms.tsnow_threshold)
if debug:
print('mb_mwea:', np.round(df.massbal.mean(),2), 'mb_mwea_std:', np.round(df.massbal.std(),2))
if n_chain == 0:
df_chains = df.values[:, :, np.newaxis]
else:
df_chains = np.dstack((df_chains, df.values))
ds = xr.Dataset({'mp_value': (('iter', 'mp', 'chain'), df_chains),
},
coords={'iter': df.index.values,
'mp': df.columns.values,
'chain': np.arange(0,n_chain+1),
})
if not os.path.exists(pygem_prms.output_fp_cal):
os.makedirs(pygem_prms.output_fp_cal)
ds.to_netcdf(pygem_prms.output_fp_cal + glacier_str + '.nc')
ds.close()
# #%%
# # Example of accessing netcdf file and putting it back into pandas dataframe
# ds = xr.open_dataset(pygem_prms.output_fp_cal + '13.00014.nc')
# df = pd.DataFrame(ds['mp_value'].sel(chain=0).values, columns=ds.mp.values)
# priors = pd.Series(ds.priors, index=ds.prior_cns)
# #%%
# ==============================================================
#%%
# Huss and Hock (2015) model calibration steps
elif pygem_prms.option_calibration == 'HH2015':
def objective(modelparameters_subset):
"""
Objective function for mass balance data.
Parameters
----------
modelparameters_subset : np.float64
List of model parameters to calibrate
[precipitation factor, precipitation gradient, degree-day factor of snow, temperature bias]
Returns
-------
mb_dif_mwea
Returns the difference in modeled vs observed mass balance [mwea]
"""
# Use a subset of model parameters to reduce number of constraints required
modelparameters[2] = modelparameters_subset[0]
modelparameters[3] = modelparameters_subset[1]
modelparameters[4] = modelparameters_subset[2]
modelparameters[5] = modelparameters[4] / ddfsnow_iceratio
modelparameters[7] = modelparameters_subset[3]
# Mass balance calculations
(glac_bin_temp, glac_bin_prec, glac_bin_acc, glac_bin_refreeze, glac_bin_snowpack, glac_bin_melt,
glac_bin_frontalablation, glac_bin_massbalclim, glac_bin_massbalclim_annual, glac_bin_area_annual,
glac_bin_icethickness_annual, glac_bin_width_annual, glac_bin_surfacetype_annual,
glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline, glac_wide_snowpack,
glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual, offglac_wide_prec,
offglac_wide_refreeze, offglac_wide_melt, offglac_wide_snowpack, offglac_wide_runoff) = (
massbalance.runmassbalance(modelparameters, glacier_rgi_table, glacier_area_initial,
icethickness_initial, width_initial, elev_bins, glacier_gcm_temp,
glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev, glacier_gcm_lrgcm,
glacier_gcm_lrglac, dates_table, option_areaconstant=1))
# Use a subset of model parameters to reduce number of constraints required
modelparameters[2] = modelparameters_subset[0]
modelparameters[3] = modelparameters_subset[1]
modelparameters[4] = modelparameters_subset[2]
modelparameters[5] = modelparameters[4] / ddfsnow_iceratio
modelparameters[7] = modelparameters_subset[3]
# Mass balance calculations
(glac_bin_temp, glac_bin_prec, glac_bin_acc, glac_bin_refreeze, glac_bin_snowpack, glac_bin_melt,
glac_bin_frontalablation, glac_bin_massbalclim, glac_bin_massbalclim_annual, glac_bin_area_annual,
glac_bin_icethickness_annual, glac_bin_width_annual, glac_bin_surfacetype_annual,
glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline, glac_wide_snowpack,
glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual, offglac_wide_prec,
offglac_wide_refreeze, offglac_wide_melt, offglac_wide_snowpack, offglac_wide_runoff) = (
massbalance.runmassbalance(modelparameters, glacier_rgi_table, glacier_area_initial,
icethickness_initial, width_initial, elev_bins, glacier_gcm_temp,
glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev, glacier_gcm_lrgcm,
glacier_gcm_lrglac, dates_table, option_areaconstant=1))
# Compute glacier volume change for every time step and use this to compute mass balance
glac_wide_area = glac_wide_area_annual[:-1].repeat(12)
# Mass change [km3 mwe]
# mb [mwea] * (1 km / 1000 m) * area [km2]
glac_wide_masschange = glac_wide_massbaltotal[t1_idx:t2_idx+1] / 1000 * glac_wide_area[t1_idx:t2_idx+1]
# Mean annual mass balance [mwea]
mb_mwea = (glac_wide_masschange.sum() / glac_wide_area[0] * 1000 /
(glac_wide_masschange.shape[0] / 12))
# Differnece [mwea] = Observed mass balance [mwea] - mb_mwea
mb_dif_mwea_abs = abs(observed_massbal - mb_mwea)
# print('Obs[mwea]:', np.round(observed_massbal,2), 'Model[mwea]:', np.round(mb_mwea,2))
# print('Dif[mwea]:', np.round(mb_dif_mwea,2))
return mb_dif_mwea_abs
def run_objective(modelparameters_init, observed_massbal, kp_bnds=(0.33,3), tbias_bnds=(-10,10),
ddfsnow_bnds=(0.0026,0.0056), precgrad_bnds=(0.0001,0.0001), run_opt=True):
"""
Run the optimization for the single glacier objective function.
Parameters
----------
modelparams_init : list
List of model parameters to calibrate
[precipitation factor, precipitation gradient, degree day factor of snow, temperature change]
glacier_cal_data : pd.DataFrame
Table containing calibration data for a single glacier
kp_bnds : tuple
Lower and upper bounds for precipitation factor (default is (0.33, 3))
tbias_bnds : tuple
Lower and upper bounds for temperature bias (default is (0.33, 3))
ddfsnow_bnds : tuple
Lower and upper bounds for degree day factor of snow (default is (0.0026, 0.0056))
precgrad_bnds : tuple
Lower and upper bounds for precipitation gradient (default is constant (0.0001,0.0001))
run_opt : boolean
Boolean statement allowing one to bypass the optimization and run through with initial parameters
(default is True - run the optimization)
Returns
-------
modelparameters_opt : optimize.optimize.OptimizeResult
Returns result of scipy optimization, which includes optimized parameters and other information
glacier_cal_compare : pd.DataFrame
Table recapping calibration results: observation, model, calibration round, etc.
"""
# Bounds
modelparameters_bnds = (kp_bnds, precgrad_bnds, ddfsnow_bnds, tbias_bnds)
# Run the optimization
# 'L-BFGS-B' - much slower
# 'SLSQP' did not work for some geodetic measurements using the sum_abs_zscore. One work around was to
# divide the sum_abs_zscore by 1000, which made it work in all cases. However, methods were switched
# to 'L-BFGS-B', which may be slower, but is still effective.
# note: switch enables running through with given parameters
if run_opt:
modelparameters_opt = minimize(objective, modelparameters_init, method=pygem_prms.method_opt,
bounds=modelparameters_bnds, options={'ftol':pygem_prms.ftol_opt})
# Record the optimized parameters
modelparameters_subset = modelparameters_opt.x
else:
modelparameters_subset = modelparameters_init.copy()
modelparams = (
[modelparameters[0], modelparameters[1], modelparameters_subset[0], modelparameters_subset[1],
modelparameters_subset[2], modelparameters_subset[2] / ddfsnow_iceratio, modelparameters[6],
modelparameters_subset[3]])
# Re-run the optimized parameters in order to see the mass balance
# Mass balance calculations
(glac_bin_temp, glac_bin_prec, glac_bin_acc, glac_bin_refreeze, glac_bin_snowpack, glac_bin_melt,
glac_bin_frontalablation, glac_bin_massbalclim, glac_bin_massbalclim_annual, glac_bin_area_annual,
glac_bin_icethickness_annual, glac_bin_width_annual, glac_bin_surfacetype_annual,
glac_wide_massbaltotal, glac_wide_runoff, glac_wide_snowline, glac_wide_snowpack,
glac_wide_area_annual, glac_wide_volume_annual, glac_wide_ELA_annual, offglac_wide_prec,
offglac_wide_refreeze, offglac_wide_melt, offglac_wide_snowpack, offglac_wide_runoff) = (
massbalance.runmassbalance(modelparams, glacier_rgi_table, glacier_area_initial, icethickness_initial,
width_initial, elev_bins, glacier_gcm_temp, glacier_gcm_tempstd, glacier_gcm_prec,
glacier_gcm_elev, glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table,
option_areaconstant=1))
# Compute glacier volume change for every time step and use this to compute mass balance
glac_wide_area = glac_wide_area_annual[:-1].repeat(12)
# Mass change [km3 mwe]
# mb [mwea] * (1 km / 1000 m) * area [km2]
glac_wide_masschange = glac_wide_massbaltotal[t1_idx:t2_idx+1] / 1000 * glac_wide_area[t1_idx:t2_idx+1]
# Mean annual mass balance [mwea]
mb_mwea = (glac_wide_masschange.sum() / glac_wide_area[0] * 1000 /
(glac_wide_masschange.shape[0] / 12))
# Differnece [mwea] = Observed mass balance [mwea] - mb_mwea
# mb_dif_mwea = observed_massbal - mb_mwea
#
# print('Obs[mwea]:', np.round(observed_massbal,2), 'Model[mwea]:', np.round(mb_mwea,2))
# print('Dif[mwea]:', np.round(mb_dif_mwea,2))
return modelparams, mb_mwea
def write_netcdf_modelparams(output_fullfn, modelparameters, mb_mwea, observed_massbal):
"""
Export glacier model parameters and modeled observations to netcdf file.
Parameters
----------
output_fullfn : str
Full filename (path included) of the netcdf to be exported
modelparams : list
model parameters
mb_mwea : float
modeled mass balance for given parameters
Returns
-------
None
Exports file to netcdf
"""
# Select data from model to be stored in netcdf
df = pd.DataFrame(index=[0])
df['lrgcm'] = np.full(df.shape[0], pygem_prms.lrgcm)
df['lrglac'] = np.full(df.shape[0], pygem_prms.lrglac)
df['kp'] = modelparameters[2]
df['precgrad'] = np.full(df.shape[0], pygem_prms.precgrad)
df['ddfsnow'] = modelparameters[4]
df['ddfice'] = df['ddfsnow'] / ddfsnow_iceratio
df['tsnow_threshold'] = np.full(df.shape[0], pygem_prms.tsnow_threshold)
df['tbias'] = modelparameters[7]
df['mb_mwea'] = mb_mwea
df['obs_mwea'] = observed_massbal
df['dif_mwea'] = mb_mwea - observed_massbal
df_export = df.values[:, :, np.newaxis]
# Set up dataset and export to netcdf
ds = xr.Dataset({'mp_value': (('iter', 'mp', 'chain'), df_export)},
coords={'iter': df.index.values,
'mp': df.columns.values,
'chain': [0]})
ds.to_netcdf(output_fullfn)
ds.close()
# <NAME> (2015) parameters and bounds
tbias_init = 0
tbias_bndlow = -10
tbias_bndhigh = 10
kp_init = 1.5
kp_bndlow = 0.8
kp_bndhigh = 2
ddfsnow_init = 0.003
ddfsnow_bndlow = 0.00175
ddfsnow_bndhigh = 0.0045
ddfsnow_iceratio = 0.5
# ===== Begin processing =====
# loop through each glacier selected
for glac in range(main_glac_rgi.shape[0]):
if debug:
print(count, main_glac_rgi.loc[main_glac_rgi.index.values[glac],'RGIId_float'])
elif glac%500 == 0:
print(count, main_glac_rgi.loc[main_glac_rgi.index.values[glac],'RGIId_float'])
# Set model parameters
modelparameters = [pygem_prms.lrgcm, pygem_prms.lrglac, kp_init, pygem_prms.precgrad, ddfsnow_init, pygem_prms.ddfice,
pygem_prms.tsnow_threshold, tbias_init]
modelparameters[5] = modelparameters[4] / ddfsnow_iceratio
# Select subsets of data
glacier_rgi_table = main_glac_rgi.loc[main_glac_rgi.index.values[glac], :]
glacier_gcm_elev = gcm_elev[glac]
glacier_gcm_prec = gcm_prec[glac,:]
glacier_gcm_temp = gcm_temp[glac,:]
glacier_gcm_tempstd = gcm_tempstd[glac,:]
glacier_gcm_lrgcm = gcm_lr[glac,:]
glacier_gcm_lrglac = glacier_gcm_lrgcm.copy()
glacier_area_initial = main_glac_hyps.iloc[glac,:].values.astype(float)
icethickness_initial = main_glac_icethickness.iloc[glac,:].values.astype(float)
width_initial = main_glac_width.iloc[glac,:].values.astype(float)
glacier_cal_data = ((cal_data.iloc[np.where(
glacier_rgi_table['rgino_str'] == cal_data['glacno'])[0],:]).copy())
glacier_str = '{0:0.5f}'.format(glacier_rgi_table['RGIId_float'])
# Select observed mass balance, error, and time data
cal_idx = glacier_cal_data.index.values[0]
# Note: index to main_glac_rgi may differ from cal_idx
t1 = glacier_cal_data.loc[cal_idx, 't1']
t2 = glacier_cal_data.loc[cal_idx, 't2']
t1_idx = int(glacier_cal_data.loc[cal_idx,'t1_idx'])
t2_idx = int(glacier_cal_data.loc[cal_idx,'t2_idx'])
# Observed mass balance [mwea]
observed_massbal = glacier_cal_data.loc[cal_idx,'mb_mwe'] / (t2 - t1)
if debug:
print('obs_mwea:', np.round(observed_massbal,2))
# Round 1: optimize precipitation factor
if debug:
print('Round 1:')
modelparameters_subset = [modelparameters[2], modelparameters[3], modelparameters[4], modelparameters[7]]
kp_bnds = (kp_bndlow, kp_bndhigh)
ddfsnow_bnds = (ddfsnow_init, ddfsnow_init)
tbias_bnds = (tbias_init, tbias_init)
modelparams, mb_mwea = run_objective(modelparameters_subset, observed_massbal,
kp_bnds=kp_bnds, tbias_bnds=tbias_bnds,
ddfsnow_bnds=ddfsnow_bnds)
kp_opt = modelparams[2]
if debug:
print('mb_mwea:', np.round(mb_mwea,2), 'kp:', np.round(kp_opt,2))
# Round 2: optimize DDFsnow
if debug:
print('Round 2:')
modelparameters_subset = [kp_opt, modelparameters[3], modelparameters[4], modelparameters[7]]
kp_bnds = (kp_opt, kp_opt)
ddfsnow_bnds = (ddfsnow_bndlow, ddfsnow_bndhigh)
tbias_bnds = (tbias_init, tbias_init)
modelparams, mb_mwea = run_objective(modelparameters_subset, observed_massbal,
kp_bnds=kp_bnds, tbias_bnds=tbias_bnds,
ddfsnow_bnds=ddfsnow_bnds)
ddfsnow_opt = modelparams[4]
if debug:
print('mb_mwea:', np.round(mb_mwea,2), 'kp:', np.round(kp_opt,2),
'ddfsnow:', np.round(ddfsnow_opt,5))
# Round 3: optimize tempbias
if debug:
print('Round 3:')
# ----- TEMPBIAS: max accumulation -----
# Lower temperature bound based on no positive temperatures
# Temperature at the lowest bin
# T_bin = T_gcm + lr_gcm * (z_ref - z_gcm) + lr_glac * (z_bin - z_ref) + tbias
lowest_bin = np.where(glacier_area_initial > 0)[0][0]
tbias_max_acc = (-1 * (glacier_gcm_temp + glacier_gcm_lrgcm *
(elev_bins[lowest_bin] - glacier_gcm_elev)).max())
tbias_bndlow = tbias_max_acc
if debug:
print('tbias_bndlow:', np.round(tbias_bndlow,2))
dif_mb_mwea = abs(observed_massbal - mb_mwea)
if debug:
print('dif:', np.round(dif_mb_mwea,2))
count = 0
while dif_mb_mwea > 0.1 and count < 20:
if count > 0:
if mb_mwea - observed_massbal > 0:
modelparameters[7] += 1
else:
modelparameters[7] -= 1
# Temperature cannot exceed lower bound
if modelparameters[7] < tbias_bndlow:
modelparameters[7] = tbias_bndlow
modelparameters_subset = [kp_opt, modelparameters[3], ddfsnow_opt, modelparameters[7]]
kp_bnds = (kp_opt, kp_opt)
ddfsnow_bnds = (ddfsnow_opt, ddfsnow_opt)
tbias_bnds = (tbias_bndlow, tbias_bndhigh)
modelparams, mb_mwea = run_objective(modelparameters_subset, observed_massbal,
kp_bnds=kp_bnds, tbias_bnds=tbias_bnds,
ddfsnow_bnds=ddfsnow_bnds)
dif_mb_mwea = abs(observed_massbal - mb_mwea)
count += 1
if debug:
print('dif:', np.round(dif_mb_mwea,2), 'count:', count, 'tc:', np.round(modelparameters[7],2))
# Break loop if at lower bound
if abs(tbias_bndlow - modelparams[7]) < 0.1:
count=20
# Record optimal temperature bias
tbias_opt = modelparams[7]
if debug:
print('mb_mwea:', np.round(mb_mwea,2), 'kp:', np.round(kp_opt,2),
'ddfsnow:', np.round(ddfsnow_opt,5), 'tbias:', np.round(tbias_opt,2))
# EXPORT TO NETCDF
netcdf_output_fp = (pygem_prms.output_fp_cal)
if not os.path.exists(netcdf_output_fp):
os.makedirs(netcdf_output_fp)
write_netcdf_modelparams(netcdf_output_fp + glacier_str + '.nc', modelparameters, mb_mwea, observed_massbal)
# ==============================================================
#%%
# MODIFIED Huss and Hock (2015) model calibration steps
# - glacier able to evolve
# - precipitaiton factor, then temperature bias (no ddfsnow)
# - ranges different
elif pygem_prms.option_calibration == 'HH2015_modified':
if pygem_prms.params2opt.sort() == ['tbias', 'kp'].sort():
def objective(modelparameters_subset):
"""
Objective function for mass balance data.
Parameters
----------
modelparameters_subset : np.float64
List of model parameters to calibrate
[precipitation factor, precipitation gradient, degree-day factor of snow, temperature bias]
Returns
-------
mb_dif_mwea
Returns the difference in modeled vs observed mass balance [mwea]
"""
# Use a subset of model parameters to reduce number of constraints required
modelparameters[2] = modelparameters_subset[0]
modelparameters[7] = modelparameters_subset[1]
# Mass balance calculation
mb_mwea = mb_mwea_calc(
modelparameters, glacier_rgi_table, glacier_area_initial, icethickness_initial, width_initial,
elev_bins, glacier_gcm_temp, glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev,
glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table, t1_idx, t2_idx, t1, t2, option_areaconstant=0)
print('model params:', modelparameters[2], modelparameters[7],
'\n mb_mwea:', mb_mwea)
# Difference [mwea]
mb_dif_mwea_abs = abs(observed_massbal - mb_mwea)
return mb_dif_mwea_abs
def run_objective(modelparameters_init, observed_massbal, kp_bnds=(0.33,3), tbias_bnds=(-10,10),
run_opt=True, eps_opt=pygem_prms.eps_opt, ftol_opt=pygem_prms.ftol_opt):
"""
Run the optimization for the single glacier objective function.
Parameters
----------
modelparams_init : list
List of model parameters to calibrate
[precipitation factor, precipitation gradient, degree day factor of snow, temperature change]
glacier_cal_data : pd.DataFrame
Table containing calibration data for a single glacier
kp_bnds : tuple
Lower and upper bounds for precipitation factor (default is (0.33, 3))
tbias_bnds : tuple
Lower and upper bounds for temperature bias (default is (0.33, 3))
ddfsnow_bnds : tuple
Lower and upper bounds for degree day factor of snow (default is (0.0026, 0.0056))
precgrad_bnds : tuple
Lower and upper bounds for precipitation gradient (default is constant (0.0001,0.0001))
run_opt : boolean
Boolean statement allowing one to bypass the optimization and run through with initial parameters
(default is True - run the optimization)
Returns
-------
modelparameters_opt : optimize.optimize.OptimizeResult
Returns result of scipy optimization, which includes optimized parameters and other information
glacier_cal_compare : pd.DataFrame
Table recapping calibration results: observation, model, calibration round, etc.
"""
# Bounds
modelparameters_bnds = (kp_bnds, tbias_bnds)
# Run the optimization
# 'L-BFGS-B' - much slower
# 'SLSQP' did not work for some geodetic measurements using the sum_abs_zscore. One work around was to
# divide the sum_abs_zscore by 1000, which made it work in all cases. However, methods were switched
# to 'L-BFGS-B', which may be slower, but is still effective.
# note: switch enables running through with given parameters
if run_opt:
modelparameters_opt = minimize(objective, modelparameters_init, method=pygem_prms.method_opt,
bounds=modelparameters_bnds,
options={'ftol':ftol_opt, 'eps':eps_opt})
# Record the optimized parameters
modelparameters_subset = modelparameters_opt.x
else:
modelparameters_subset = modelparameters_init.copy()
modelparams = (
[modelparameters[0], modelparameters[1], modelparameters_subset[0], modelparameters[3],
modelparameters[4], modelparameters[4] / ddfsnow_iceratio, modelparameters[6],
modelparameters_subset[1]])
# Re-run the optimized parameters in order to see the mass balance
mb_mwea = mb_mwea_calc(
modelparameters, glacier_rgi_table, glacier_area_initial, icethickness_initial, width_initial,
elev_bins, glacier_gcm_temp, glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev,
glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table, t1_idx, t2_idx, t1, t2, option_areaconstant=0)
return modelparams, mb_mwea
else:
def objective(modelparameters_subset):
"""
Objective function for mass balance data.
Parameters
----------
modelparameters_subset : np.float64
List of model parameters to calibrate
[precipitation factor, precipitation gradient, degree-day factor of snow, temperature bias]
Returns
-------
mb_dif_mwea
Returns the difference in modeled vs observed mass balance [mwea]
"""
# Use a subset of model parameters to reduce number of constraints required
modelparameters[2] = modelparameters_subset[0]
modelparameters[3] = modelparameters_subset[1]
modelparameters[4] = modelparameters_subset[2]
modelparameters[5] = modelparameters[4] / ddfsnow_iceratio
modelparameters[7] = modelparameters_subset[3]
# Mass balance calculation
mb_mwea = mb_mwea_calc(
modelparameters, glacier_rgi_table, glacier_area_initial, icethickness_initial, width_initial,
elev_bins, glacier_gcm_temp, glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev,
glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table, t1_idx, t2_idx, t1, t2, option_areaconstant=0)
print('model params:', modelparameters[2], modelparameters[7],
'\n mb_mwea:', mb_mwea)
# Difference [mwea]
mb_dif_mwea_abs = abs(observed_massbal - mb_mwea)
return mb_dif_mwea_abs
def run_objective(modelparameters_init, observed_massbal, kp_bnds=(0.33,3), tbias_bnds=(-10,10),
ddfsnow_bnds=(0.0026,0.0056), precgrad_bnds=(0.0001,0.0001), run_opt=True,
eps_opt=pygem_prms.eps_opt):
"""
Run the optimization for the single glacier objective function.
Parameters
----------
modelparams_init : list
List of model parameters to calibrate
[precipitation factor, precipitation gradient, degree day factor of snow, temperature change]
glacier_cal_data : pd.DataFrame
Table containing calibration data for a single glacier
kp_bnds : tuple
Lower and upper bounds for precipitation factor (default is (0.33, 3))
tbias_bnds : tuple
Lower and upper bounds for temperature bias (default is (0.33, 3))
ddfsnow_bnds : tuple
Lower and upper bounds for degree day factor of snow (default is (0.0026, 0.0056))
precgrad_bnds : tuple
Lower and upper bounds for precipitation gradient (default is constant (0.0001,0.0001))
run_opt : boolean
Boolean statement allowing one to bypass the optimization and run through with initial parameters
(default is True - run the optimization)
Returns
-------
modelparameters_opt : optimize.optimize.OptimizeResult
Returns result of scipy optimization, which includes optimized parameters and other information
glacier_cal_compare : pd.DataFrame
Table recapping calibration results: observation, model, calibration round, etc.
"""
# Bounds
modelparameters_bnds = (kp_bnds, precgrad_bnds, ddfsnow_bnds, tbias_bnds)
# Run the optimization
# 'L-BFGS-B' - much slower
# 'SLSQP' did not work for some geodetic measurements using the sum_abs_zscore. One work around was to
# divide the sum_abs_zscore by 1000, which made it work in all cases. However, methods were switched
# to 'L-BFGS-B', which may be slower, but is still effective.
# note: switch enables running through with given parameters
if run_opt:
modelparameters_opt = minimize(objective, modelparameters_init, method=pygem_prms.method_opt,
bounds=modelparameters_bnds,
options={'ftol':pygem_prms.ftol_opt, 'eps':eps_opt})
# Record the optimized parameters
modelparameters_subset = modelparameters_opt.x
else:
modelparameters_subset = modelparameters_init.copy()
modelparams = (
[modelparameters[0], modelparameters[1], modelparameters_subset[0], modelparameters_subset[1],
modelparameters_subset[2], modelparameters_subset[2] / ddfsnow_iceratio, modelparameters[6],
modelparameters_subset[3]])
# Re-run the optimized parameters in order to see the mass balance
mb_mwea = mb_mwea_calc(
modelparameters, glacier_rgi_table, glacier_area_initial, icethickness_initial, width_initial,
elev_bins, glacier_gcm_temp, glacier_gcm_tempstd, glacier_gcm_prec, glacier_gcm_elev,
glacier_gcm_lrgcm, glacier_gcm_lrglac, dates_table, t1_idx, t2_idx, t1, t2, option_areaconstant=0)
return modelparams, mb_mwea
def write_netcdf_modelparams(output_fullfn, modelparameters, mb_mwea, observed_massbal):
"""
Export glacier model parameters and modeled observations to netcdf file.
Parameters
----------
output_fullfn : str
Full filename (path included) of the netcdf to be exported
modelparams : list
model parameters
mb_mwea : float
modeled mass balance for given parameters
Returns
-------
None
Exports file to netcdf
"""
# Select data from model to be stored in netcdf
df = | pd.DataFrame(index=[0]) | pandas.DataFrame |
import pandas as pd
from scoreware.race.utils import get_last_name
def parse_general(df, headers, id):
newdf= | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2022/5/10 15:19
describe: 请描述文件用途
"""
import glob
import traceback
import pandas as pd
from tqdm import tqdm
from czsc.traders.advanced import CzscAdvancedTrader
from czsc.utils import dill_load
from czsc.objects import cal_break_even_point
class PairsPerformance:
"""交易对效果评估"""
def __init__(self, df_pairs: pd.DataFrame, ):
"""
:param df_pairs: 全部交易对,数据样例如下
标的代码 交易方向 最大仓位 开仓时间 累计开仓 平仓时间 \
0 000001.SH 多头 1 2020-02-06 09:45:00 2820.014893 2020-02-10 13:15:00
1 000001.SH 多头 1 2020-03-20 14:15:00 2733.164062 2020-03-27 14:15:00
2 000001.SH 多头 1 2020-03-30 13:30:00 2747.813965 2020-03-31 13:15:00
3 000001.SH 多头 1 2020-04-01 10:45:00 2765.350098 2020-04-02 09:45:00
4 000001.SH 多头 1 2020-04-02 14:15:00 2757.827881 2020-04-09 11:15:00
累计平仓 累计换手 持仓K线数 事件序列 持仓天数 盈亏金额 交易盈亏 \
0 2872.166992 2 40 开多@低吸 > 平多@60分钟顶背驰 4.145833 52.152100 0.0184
1 2786.754883 2 80 开多@低吸 > 平多@60分钟顶背驰 7.000000 53.590820 0.0196
2 2752.198975 2 15 开多@低吸 > 平多@持有资金 0.989583 4.385010 0.0015
3 2721.335938 2 12 开多@低吸 > 平多@持有资金 0.958333 -44.014160 -0.0159
4 2821.693115 2 58 开多@低吸 > 平多@60分钟顶背驰 6.875000 63.865234 0.0231
盈亏比例
0 0.0184
1 0.0196
2 0.0015
3 -0.0159
4 0.0231
"""
time_convert = lambda x: (x.strftime("%Y年"), x.strftime("%Y年%m月"), x.strftime("%Y-%m-%d"),
f"{x.year}年第{x.weekofyear}周" if x.weekofyear >= 10 else f"{x.year}年第0{x.weekofyear}周",
)
df_pairs[['开仓年', '开仓月', '开仓日', '开仓周']] = list(df_pairs['开仓时间'].apply(time_convert))
df_pairs[['平仓年', '平仓月', '平仓日', '平仓周']] = list(df_pairs['平仓时间'].apply(time_convert))
self.df_pairs = df_pairs
# 指定哪些列可以用来进行聚合分析
self.agg_columns = ['标的代码', '交易方向',
'平仓年', '平仓月', '平仓周', '平仓日',
'开仓年', '开仓月', '开仓日', '开仓周']
@staticmethod
def get_pairs_statistics(df_pairs: pd.DataFrame):
"""统计一组交易的基本信息
:param df_pairs:
:return:
"""
if len(df_pairs) == 0:
info = {
"开始时间": None,
"结束时间": None,
"交易标的数量": 0,
"总体交易次数": 0,
"平均持仓天数": 0,
"平均单笔收益": 0,
"单笔收益标准差": 0,
"最大单笔收益": 0,
"最小单笔收益": 0,
"交易胜率": 0,
"累计盈亏比": 0,
"交易得分": 0,
"每自然日收益": 0,
"盈亏平衡点": 0,
}
return info
win_pct = round(len(df_pairs[df_pairs['盈亏比例'] > 0]) / len(df_pairs), 4)
df_gain = df_pairs[df_pairs['盈亏比例'] > 0]
df_loss = df_pairs[df_pairs['盈亏比例'] <= 0]
gain = df_gain['盈亏比例'].sum()
loss = abs(df_loss['盈亏比例'].sum())
# 限制累计盈亏比最大有效值
gain_loss_rate = min(round(gain / (loss + 0.000001), 2), 5)
info = {
"开始时间": df_pairs['开仓时间'].min(),
"结束时间": df_pairs['平仓时间'].max(),
"交易标的数量": df_pairs['标的代码'].nunique(),
"总体交易次数": len(df_pairs),
"平均持仓天数": round(df_pairs['持仓天数'].mean(), 2),
"平均单笔收益": round(df_pairs['盈亏比例'].mean() * 10000, 2),
"单笔收益标准差": round(df_pairs['盈亏比例'].std() * 10000, 2),
"最大单笔收益": round(df_pairs['盈亏比例'].max() * 10000, 2),
"最小单笔收益": round(df_pairs['盈亏比例'].min() * 10000, 2),
"交易胜率": win_pct,
"累计盈亏比": gain_loss_rate,
"交易得分": round(gain_loss_rate * win_pct, 4),
"盈亏平衡点": round(cal_break_even_point(df_pairs['盈亏比例'].to_list()), 4),
}
info['每自然日收益'] = round(info['平均单笔收益'] / info['平均持仓天数'], 2)
return info
def agg_statistics(self, col: str):
"""按列聚合进行交易对评价"""
df_pairs = self.df_pairs.copy()
assert col in self.agg_columns, f"{col} 不是支持聚合的列,参考:{self.agg_columns}"
results = []
for name, dfg in df_pairs.groupby(col):
if dfg.empty:
continue
res = {col: name}
res.update(self.get_pairs_statistics(dfg))
results.append(res)
df = pd.DataFrame(results)
return df
@property
def basic_info(self):
"""写入基础信息"""
df_pairs = self.df_pairs.copy()
return self.get_pairs_statistics(df_pairs)
def agg_to_excel(self, file_xlsx):
"""遍历聚合列,保存结果到 Excel 文件中"""
f = pd.ExcelWriter(file_xlsx)
for col in ['标的代码', '交易方向', '平仓年', '平仓月', '平仓周', '平仓日']:
df_ = self.agg_statistics(col)
df_.to_excel(f, sheet_name=f"{col}聚合", index=False)
f.close()
print(f"聚合分析结果文件:{file_xlsx}")
class TradersPerformance:
"""Trader Strategy 的效果评估"""
def __init__(self, traders_pat):
self.file_traders = glob.glob(traders_pat)
def get_pairs(self, sdt, edt):
"""获取一段时间内的所有交易对
:param sdt: 开始时间
:param edt: 结束时间
:return:
"""
sdt = pd.to_datetime(sdt)
edt = pd.to_datetime(edt)
_results = []
for file in tqdm(self.file_traders, desc=f"get_pairs | {sdt} | {edt}"):
try:
trader: CzscAdvancedTrader = dill_load(file)
_pairs = [x for x in trader.long_pos.pairs if edt >= x['平仓时间'] > x['开仓时间'] >= sdt]
_results.extend(_pairs)
except:
print(file)
traceback.print_exc()
df = pd.DataFrame(_results)
return df
def get_holds(self, sdt, edt):
"""获取一段时间内的所有持仓信号
:param sdt: 开始时间
:param edt: 结束时间
:return: 返回数据样例如下
dt symbol long_pos n1b
0 2020-01-02 09:45:00 000001.SH 0 0.004154
1 2020-01-02 10:00:00 000001.SH 0 0.001472
2 2020-01-02 10:15:00 000001.SH 0 0.001291
3 2020-01-02 10:30:00 000001.SH 0 0.001558
4 2020-01-02 10:45:00 000001.SH 0 -0.001355
"""
sdt = pd.to_datetime(sdt)
edt = pd.to_datetime(edt)
_results = []
for file in tqdm(self.file_traders, desc=f"get_holds | {sdt} | {edt}"):
try:
trader: CzscAdvancedTrader = dill_load(file)
_lh = [x for x in trader.long_holds if edt >= x['dt'] >= sdt]
_results.extend(_lh)
except:
print(file)
traceback.print_exc()
df = | pd.DataFrame(_results) | pandas.DataFrame |
from os import link
import flask
from flask.globals import request
from flask import Flask, render_template
# library used for prediction
import numpy as np
import pandas as pd
import pickle
# library used for insights
import json
import plotly
import plotly.express as px
app = Flask(__name__, template_folder = 'templates')
link_active = None
# render home template
@app.route('/')
def main():
return(render_template('home.html', title = 'Home'))
# load pickle file
model = pickle.load(open('model/rf_classifier.pkl', 'rb'))
scaler = pickle.load(open('model/scaler.pkl', 'rb'))
@app.route('/form')
def form():
show_prediction = False
link_active = 'Form'
return(render_template('form.html', title = 'Form', show_prediction = show_prediction, link_active = link_active))
@app.route('/insights')
def insights():
link_active = 'Insights'
df = | pd.read_csv('online_shoppers_intention.csv') | pandas.read_csv |
from __future__ import print_function
# from builtins import str
# from builtins import object
import pandas as pd
from openpyxl import load_workbook
import numpy as np
import os
from .data_utils import make_dir
class XlsxRecorder(object):
"""
xlsx recorder for results
including two recorder: one for current experiments, record details of results changed by iteration
the other is for record the summary of different expreiments, which is saved by summary_path
1. detailed results: saved in fig_save_path/results.xlsx
** Sheet1: #total_filename x #metrics, along each row direction, are the records by iteraton
** batch_0: #batch_filename x #metric_by_label , along each column direction, are the records by iteration
** batch_1: same as batch_0
** ......
2. task results: saved in ../data/summary.xlsx
** Sheet1: task_name * #metrics recorded by iteration
"""
def __init__(self, expr_name, saving_path='', folder_name=''):
self.expr_name = expr_name
if not len(saving_path):
self.saving_path = '../data/'+expr_name #saving_path
else:
self.saving_path = saving_path
self.saving_path = os.path.abspath(self.saving_path)
self.folder_name = folder_name
if len(folder_name):
self.saving_path = os.path.join(self.saving_path, folder_name)
"""path of saving excel, default is the same as the path of saving figures"""
self.writer_path = None
self.xlsx_writer = None
self.summary_path = '../data/summary.xlsx'
"""the path for summary, which can record results from different experiments"""
self.measures = ['iou', 'precision', 'recall', 'dice']
"""measures to record"""
self.batch_count = {}
self.row_space = 50
self.column_space = 10
self.start_row = 0
self.summary = None
self.avg_buffer = {}
self.iter_info_buffer = []
self.name_list_buffer = []
self.init_summary()
print("the update space in detailed files is {}".format(self.row_space))
def init_summary(self):
""" init two recorders, initilzation would create a new recorder for this experiment, recording all details
at the same time it would load the data from summary recorder, then it would append the new experiment summary to summary recorder
"""
if not os.path.exists(self.saving_path ):
os.makedirs(self.saving_path )
self.writer_path = os.path.join(self.saving_path, 'results.xlsx')
writer = pd.ExcelWriter(self.writer_path, engine='xlsxwriter')
df = pd.DataFrame([])
df.to_excel(writer)
worksheet = writer.sheets['Sheet1']
worksheet.set_column(1, 1000, 30)
writer.save()
writer.close()
self.writer_book = load_workbook(self.writer_path)
self.xlsx_writer = pd.ExcelWriter(self.writer_path, engine='openpyxl')
self.xlsx_writer.book = self.writer_book
self.xlsx_writer.sheets = dict((ws.title, ws) for ws in self.writer_book.worksheets)
if not os.path.exists(self.summary_path):
writer = pd.ExcelWriter(self.summary_path, engine = 'xlsxwriter')
df = pd.DataFrame([])
df.to_excel(writer)
worksheet = writer.sheets['Sheet1']
worksheet.set_column(1, 1000, 30)
writer.save()
writer.close()
def set_batch_based_env(self,name_list,batch_id):
# need to be set before each saving operation
self.name_list = name_list
self.sheet_name = 'batch_'+ str(batch_id)
if self.sheet_name not in self.batch_count:
self.batch_count[self.sheet_name] = -1
self.name_list_buffer += self.name_list
self.batch_count[self.sheet_name] += 1
count = self.batch_count[self.sheet_name]
self.start_row = count * self.row_space
self.start_column = 0
def set_summary_based_env(self):
self.sheet_name = 'Sheet1'
self.start_row = 0
def put_into_avg_buff(self, result, iter_info):
"""
# avg_buffer is to save avg_results from each iter, from each batch
# iter_info: string contains iter info
# the buffer is organized as { iter_info1: results_list_iter1, iter_info2:results_list_iter2}
# results_list_iter1 : [batch1_res_iter1, batch2_res_iter1]
# batch1_res_iter1:{metric1: result, metric2: result}
"""
if iter_info not in self.avg_buffer:
self.avg_buffer[iter_info] = []
self.iter_info_buffer += [iter_info]
self.avg_buffer[iter_info] += [result]
def merge_from_avg_buff(self):
"""
# iter_info: string contains iter info
# the buffer is organized as { iter_info1: results_list_iter1, iter_info2:results_list_iter2}
# results_list_iter1 : [batch1_res_iter1, batch2_res_iter1]
# batch1_res_iter1:{metric1: result, metric2: result}
# return: dic: {iter_info1:{ metric1: nFile x 1 , metric2:...}, iter_info2:....}
"""
metric_avg_dic={}
for iter_info,avg_list in list(self.avg_buffer.items()):
metric_results_tmp = {metric: [result[metric] for result in avg_list] for metric in
self.measures}
metric_avg_dic[iter_info] = {metric: np.concatenate(metric_results_tmp[metric], 0) for metric in metric_results_tmp}
return metric_avg_dic
def saving_results(self,sched, results=None, info=None, averaged_results=None):
"""
the input results should be different for each sched
batch: the input result should be dic , each measure inside should be B x N_label
buffer: the input result should be dic, each measure inside should be N_img x 1
summary: no input needed, the summary could be got from the buffer
:param results:
:param sched:
:param info:
:return:
"""
if sched == 'batch':
label_info = info['label_info']
iter_info = info['iter_info']
self.saving_all_details(results,averaged_results,label_info,iter_info)
elif sched == 'buffer':
iter_info = info['iter_info']
self.put_into_avg_buff(results,iter_info)
elif sched == 'summary':
self.summary_book = load_workbook(self.summary_path)
self.summary_writer = pd.ExcelWriter(self.summary_path,engine='openpyxl')
self.set_summary_based_env()
metric_avg_dic = self.merge_from_avg_buff()
self.saving_label_averaged_results(metric_avg_dic)
self.saving_summary(metric_avg_dic)
self.save_figs_for_batch(metric_avg_dic)
self.xlsx_writer.close()
self.summary_writer.close()
else:
raise ValueError("saving method not implemented")
def saving_label_averaged_results(self, results):
"""
# saved by iteration
# results: dic: {iter_info1:{ metric1: nFile x 1 , metric2:...}, iter_info2:....}
# saving the n_File*nAvgMetrics into xlsx_writer
# including the iter_info
"""
start_column = 0
results_summary = {iter_info: {metric:np.mean(results[iter_info][metric]).reshape(1,1) for metric in self.measures} for iter_info in self.iter_info_buffer}
for iter_info in self.iter_info_buffer:
iter_expand = {metric: np.squeeze(np.concatenate((results[iter_info][metric], results_summary[iter_info][metric]), 0)) for metric in self.measures}
df = pd.DataFrame.from_dict(iter_expand)
df = df[self.measures]
try:
df.index = pd.Index(self.name_list_buffer+['average'])
except:
print("DEBUGGING !!, the iter_expand is {},\n self.name_list_buffer is {},\n results summary is {} \n results{}\n".format(iter_expand,self.name_list_buffer, results_summary,results))
df.to_excel(self.xlsx_writer, sheet_name=self.sheet_name, startcol=start_column, index_label=iter_info)
start_column += self.column_space
self.xlsx_writer.save()
def saving_summary(self, results):
"""
# saved by iteration
# saving the 1*nAvgMetrics into summary_book_path
# including the task name and iter_info
"""
self.summary_writer.book = self.summary_book
self.summary_writer.sheets = dict((ws.title, ws) for ws in self.summary_book.worksheets)
col_name_list = [metric+iter_info for metric in self.measures for iter_info in self.iter_info_buffer]
results_summary = {metric+iter_info: np.mean(results[iter_info][metric]).reshape(1) for metric in self.measures for iter_info in self.iter_info_buffer}
df = pd.DataFrame.from_dict(results_summary)
df = df[col_name_list]
df.index = | pd.Index([self.expr_name+self.folder_name]) | pandas.Index |
import pyodbc
import pandas as pd
from patientKG import *
import holoviews as hv
from holoviews import opts
from bokeh.plotting import show
import panel as pn
import networkx as nx
from ..config.bedrock_connection import *
from ..priorKnowledge import labturnaround
from patientKG import utils_pickle
from PU.pu_events import *
hv.extension('bokeh')
defaults = dict(width=1000, height=1000, padding=0.1)
def test_hrg_example():
Bedrock_Conn = Bedrock()
#DF = pd.read_sql_query('Select * from dbo.vw_STRANDED_ORDERS_IN_ALL_PIVOT_FILTED where activity_identifier = \'4532029\'',Bedrock_Conn)
DF = pd.read_csv('patientKG/tests/MockPatient.csv')
Bedrock_Conn.close()
Event_list = [
'RED BLOOD CELL COUNT'
,'MEAN CELL HAEMOGLOBIN'
,'HAEMOGLOBIN'
,'HAEMATOCRIT'
,'PLATELET COUNT'
,'MEAN CELL VOLUME'
,'MEAN CELL HAEMOGLOBIN CONC'
,'WHITE BLOOD CELL COUNT'
,'MONOCYTE COUNT'
,'NEUTROPHIL COUNT'
,'LYMPHOCYTE COUNT'
,'EOSINOPHIL COUNT'
,'BASOPHIL COUNT'
,'SODIUM'
,'UREA LEVEL'
,'CREATININE'
,'POTASSIUM'
,'C-REACTIVE PROTEIN']
Event_list_Dic = {
'RED BLOOD CELL COUNT':'numeric'
,'MEAN CELL HAEMOGLOBIN':'numeric'
,'HAEMOGLOBIN':'numeric'
,'HAEMATOCRIT':'numeric'
,'PLATELET COUNT':'numeric'
,'MEAN CELL VOLUME':'numeric'
,'MEAN CELL HAEMOGLOBIN CONC':'numeric'
,'WHITE BLOOD CELL COUNT':'numeric'
,'MONOCYTE COUNT':'numeric'
,'NEUTROPHIL COUNT':'numeric'
,'LYMPHOCYTE COUNT':'numeric'
,'EOSINOPHIL COUNT':'numeric'
,'BASOPHIL COUNT':'numeric'
,'SODIUM':'numeric'
,'UREA LEVEL':'numeric'
,'CREATININE':'numeric'
,'POTASSIUM':'numeric'
,'C-REACTIVE PROTEIN':'numeric'}
Columns_Data_Spec_Dic = {
'unique_id':'ACTIVITY_IDENTIFIER'#Used as unique identifier
,'LengthofStay':'TOTAL_LOS'
,'Age':'HPS_AGE_AT_ADMISSION_DATE' #Used as Global Attributes
,'EventCatalog':'ORDER_CATALOG_DESCRIPTION'
,'EventStartDT':'ORDER_DATE_TIME'
,'EventEndDT':'ORDER_RESULT_PERFORMED_DATE_TIME'
,'SpellStartDT':'HPS_START_DATE_TIME_HOSPITAL_PROVIDER_SPELL'
,'SpellEndDT':'HPS_DISCHARGE_DATE_TIME_HOSPITAL_PROVIDER_SPELL'
}
Columns_Data_Spec = [
['cd_mapping','unique_id','ACTIVITY_IDENTIFIER']
,['cd_mapping','LengthofStay','TOTAL_LOS']
,['cd_mapping','Age','HPS_AGE_AT_ADMISSION_DATE']
,['cd_mapping','EventCatalog','ORDER_CATALOG_DESCRIPTION']
,['cd_mapping','EventStartDT','ORDER_DATE_TIME']
,['cd_mapping','EventEndDT','ORDER_RESULT_PERFORMED_DATE_TIME']
,['cd_mapping','SpellStartDT','HPS_START_DATE_TIME_HOSPITAL_PROVIDER_SPELL']
,['cd_mapping','SpellEndDT','HPS_DISCHARGE_DATE_TIME_HOSPITAL_PROVIDER_SPELL']
,['event_date_type','RED BLOOD CELL COUNT','numeric']
,['event_date_type','MEAN CELL HAEMOGLOBIN','numeric']
,['event_date_type','HAEMOGLOBIN','numeric']
,['event_date_type','HAEMATOCRIT','numeric']
,['event_date_type','PLATELET COUNT','numeric']
,['event_date_type','MEAN CELL VOLUME','numeric']
,['event_date_type','MEAN CELL HAEMOGLOBIN CONC','numeric']
,['event_date_type','WHITE BLOOD CELL COUNT','numeric']
,['event_date_type','MONOCYTE COUNT','numeric']
,['event_date_type','NEUTROPHIL COUNT','numeric']
,['event_date_type','LYMPHOCYTE COUNT','numeric']
,['event_date_type','EOSINOPHIL COUNT','numeric']
,['event_date_type','BASOPHIL COUNT','numeric']
,['event_date_type','SODIUM','numeric']
,['event_date_type','UREA LEVEL','numeric']
,['event_date_type','CREATININE','numeric']
,['event_date_type','POTASSIUM','numeric']
,['event_date_type','C-REACTIVE PROTEIN','numeric']
]
df = pd.DataFrame(Columns_Data_Spec)
df.columns = ['Function','key','value']
print(df[df['Function']=='event_date_type'])
#reference=DF.groupby('ORDER_CATALOG_DESCRIPTION')['TURNAROUND'].describe()
reference = labturnaround.LabTurnAround().get_reference_from_db()
item = 123456
test1=DF[DF['ACTIVITY_IDENTIFIER']==item]
test = graphs_base.PatientKG(DF[DF['ACTIVITY_IDENTIFIER']==item],Event_list,['TOTAL_LOS','HPS_AGE_AT_ADMISSION_DATE'],reference,Columns_Data_Spec_Dic)
test.add_nodes_by_period(period=None)
test.add_full_edges()
test.add_linked_edges()
graph1 = test.graph
hv.opts.defaults(
opts.EdgePaths(**defaults), opts.Graph(**defaults), opts.Nodes(**defaults))
simple_graph=hv.Graph.from_networkx(graph1, test.timeline_layout(dict((int(v),k) for k,v in test.node_dic.items())))
#print(graph1.degree)
#bokeh_server = pn.Row(simple_graph).show(port=12345)
#bokeh_server.stop()
#show(simple_graph)
return test
def test_pu_example_wardstay(item='4194205'):
Red004_Conn = Red004()
DF = | pd.read_sql_query('SELECT * FROM [AdvancedAnalytics].[dbo].[Patient_Episode_Ward_Stay] where ACTIVITY_IDENTIFIER = '+ item +' order by ACTIVITY_IDENTIFIER, CE_EPISODE_NUMBER, WARD_STAY_ORDER',Red004_Conn) | pandas.read_sql_query |
import torch
import numpy as np
from torch.utils import data
import pandas as pd
from sklearn.model_selection import train_test_split, KFold
from time import time
class Dataset:
def tag2tok(self, tags):
if pd.isnull(tags):
return np.nan
tok_tags = [self.tag_vocab["<s>"]]
for t in tags.split(" "):
tok_tags.append(self.tag_vocab[t])
tok_tags.append(self.tag_vocab["</s>"])
return tok_tags
def str2tok(self, s):
sentence = [self.vocab["<s>"]]
words = [[self.char_vocab["<s>"]]]
for w in s.split(" "):
chars = []
for c in w:
if c in self.char_vocab:
chars.append(self.char_vocab[c])
else:
chars.append(self.char_vocab["<unk>"])
words.append(chars)
if w in self.vocab:
sentence.append(self.vocab[w])
else:
sentence.append(self.vocab["<unk>"])
sentence.append(self.vocab["</s>"])
words.append([self.char_vocab["</s>"]])
return sentence, words
def fit(self, df):
tok_tweet, tok_chars, slen, wlen, tok_tags = [], [], [], [], []
for w, t in zip(df["clean_tweet"], df["entities"]):
tk, tc = self.str2tok(w)
tt = self.tag2tok(t)
tok_tweet.append(tk)
tok_chars.append(tc)
tok_tags.append(tt)
slen.append(len(tk))
wlen.append(max([len(w) for w in tc]))
df["tok_tweet"] = tok_tweet
df["tok_chars"] = tok_chars
df["tok_tags"] = tok_tags
df["slen"], df["wlen"] = slen, wlen
return df
def build_vocabs(self, set):
# Initialize vocabulary
self.vocab = {"<p>": 0, "<unk>": 1, "<s>": 2, "</s>": 3}
self.char_vocab = {"<p>": 0, "<unk>": 1, "<s>": 2, "</s>": 3}
self.tag_vocab = {"<p>": 0, "<s>": 1, "</s>": 2}
# fill a dict with word : count
w_count = dict()
for n, line in enumerate(set["clean_tweet"]):
for w in line.split(" "):
w_count[w] = 1 if w not in w_count else w_count[w]+1
# add words to the vocab if they are above threshold
for w, c in w_count.items():
if (c >= self.threshold):
self.vocab[w] = len(self.vocab)
for char in w:
if not char in self.char_vocab:
self.char_vocab[char] = len(self.char_vocab)
del w_count
# tags
for s in set[set["relevant"] == 1]["entities"]:
for w in s.split():
if not w in self.tag_vocab:
self.tag_vocab[w] = len(self.tag_vocab)
# Save a inverted vocab
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.inv_tags = {v: k for k, v in self.tag_vocab.items()}
def __init__(self, batch_size=256, threshold=100, tokenizer=None):
# Variables
# threshold = min occurences to keep a word in the vocab
self.threshold = threshold
self.batch_size = batch_size
self.tokenizer = tokenizer
# Load Dataset
dset = pd.read_csv("./datasets/all_data.csv", sep="\t")
dset["timestamp"] = pd.to_datetime(
dset["timestamp"]).dt.tz_convert(None)
# Timestamps
train = ( | pd.Timestamp(2016, 11, 21) | pandas.Timestamp |
#%%
path = '../../dataAndModel/data/o2o/'
import os, sys, pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import date
from sklearn.linear_model import SGDClassifier, LogisticRegression
dfoff = pd.read_csv(path+'ccf_offline_stage1_train.csv')
dftest = pd.read_csv(path+'ccf_offline_stage1_test_revised.csv')
dfon = pd.read_csv(path+'ccf_online_stage1_train.csv')
print('data read end.')
print(dfoff['Discount_rate'])
#%%
dfoff['distance'] = dfoff['Distance'].fillna(-1).astype(int)
print(dfoff['Distance'])
print(dfoff['distance'])
#%%
print(dfoff['distance'][13])
print(dfoff['Distance'][13])
#%%
# %%
# 1. 将满xx减yy类型(`xx:yy`)的券变成折扣率 : `1 - yy/xx`,同时建立折扣券相关的特征 `discount_rate, discount_man, discount_jian, discount_type`
# 2. 将距离 `str` 转为 `int`
# convert Discount_rate and Distance
def getDiscountType(row):
if pd.isnull(row):
return np.nan
elif ':' in row:
return 1
else:
return 0
def convertRate(row):
"""Convert discount to rate"""
if pd.isnull(row):
return 1.0
elif ':' in str(row):
rows = row.split(':')
return 1.0 - float(rows[1])/float(rows[0])
else:
return float(row)
def getDiscountMan(row):
if ':' in str(row):
rows = row.split(':')
return int(rows[0])
else:
return 0
def getDiscountJian(row):
if ':' in str(row):
rows = row.split(':')
return int(rows[1])
else:
return 0
#%%
def processData(df):
# convert discunt_rate
df['discount_rate'] = df['Discount_rate'].apply(convertRate)
df['discount_man'] = df['Discount_rate'].apply(getDiscountMan)
df['discount_jian'] = df['Discount_rate'].apply(getDiscountJian)
df['discount_type'] = df['Discount_rate'].apply(getDiscountType)
print(df['discount_rate'].unique())
# convert distance
df['distance'] = df['Distance'].fillna(-1).astype(int)
return df
dfoff = processData(dfoff)
# dftest = processData(dftest)
#%%
date_received = dfoff['Date_received'].unique()
date_received = sorted(date_received[pd.notnull(date_received)])
date_buy = dfoff['Date'].unique()
date_buy = sorted(date_buy[pd.notnull(date_buy)])
date_buy = sorted(dfoff[dfoff['Date'].notnull()]['Date'])
couponbydate = dfoff[dfoff['Date_received'].notnull()][['Date_received', 'Date']].groupby(['Date_received'], as_index=False).count()
couponbydate.columns = ['Date_received','count']
buybydate = dfoff[(dfoff['Date'].notnull()) & (dfoff['Date_received'].notnull())][['Date_received', 'Date']].groupby(['Date_received'], as_index=False).count()
buybydate.columns = ['Date_received','count']
print("end")
#%%
def getWeekday(row):
if row == 'nan':
return np.nan
else:
return date(int(row[0:4]), int(row[4:6]), int(row[6:8])).weekday() + 1
dfoff['weekday'] = dfoff['Date_received'].astype(str).apply(getWeekday)
dftest['weekday'] = dftest['Date_received'].astype(str).apply(getWeekday)
# weekday_type : 周六和周日为1,其他为0
dfoff['weekday_type'] = dfoff['weekday'].apply(lambda x : 1 if x in [6,7] else 0 )
dftest['weekday_type'] = dftest['weekday'].apply(lambda x : 1 if x in [6,7] else 0 )
# change weekday to one-hot encoding
weekdaycols = ['weekday_' + str(i) for i in range(1,8)]
tmpdf = pd.get_dummies(dfoff['weekday'].replace('nan', np.nan))
print(tmpdf)
tmpdf.columns = weekdaycols
dfoff[weekdaycols] = tmpdf
print(dfoff)
tmpdf = pd.get_dummies(dftest['weekday'].replace('nan', np.nan))
tmpdf.columns = weekdaycols
dftest[weekdaycols] = tmpdf
def label(row):
if pd.isnull(row['Date_received']):
return -1
if pd.notnull(row['Date']):
td = pd.to_datetime(row['Date'], format='%Y%m%d') - pd.to_datetime(row['Date_received'], format='%Y%m%d')
if td <= | pd.Timedelta(15, 'D') | pandas.Timedelta |
# Importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# lightgbm for classification
from numpy import mean
from numpy import std
#from sklearn.datasets import make_classification
from lightgbm import LGBMClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
#from matplotlib import pyplot
path = '../Data'
train = pd.read_csv(path + "/train.csv")
test = pd.read_csv(path + "/test.csv")
# submission = pd.read_csv(path + "/sample_submission.csv")
print(train.head())
"""### Filling the null values in Number_Weeks_Used column"""
train['Number_Weeks_Used'] = train['Number_Weeks_Used'].fillna(
train.groupby('Pesticide_Use_Category')['Number_Weeks_Used'].transform('median'))
test['Number_Weeks_Used'] = test['Number_Weeks_Used'].fillna(
test.groupby('Pesticide_Use_Category')['Number_Weeks_Used'].transform('median'))
"""### Data Preprocessing"""
training_labels = train.iloc[:, -1]
X_train = train.iloc[:, 1:-1]
X_test = test.iloc[:, 1:]
data = pd.concat([X_train, X_test])
# data.head()
columns_names_encod = data.columns[[3, 7]]
data = | pd.get_dummies(data, columns=columns_names_encod) | pandas.get_dummies |
"""Module is for data (time series and anomaly list) processing.
"""
from typing import Dict, List, Optional, Tuple, Union, overload
import numpy as np
import pandas as pd
def validate_series(
ts: Union[pd.Series, pd.DataFrame],
check_freq: bool = True,
check_categorical: bool = False,
) -> Union[pd.Series, pd.DataFrame]:
"""Validate time series.
This functoin will check some common critical issues of time series that
may cause problems if anomaly detection is performed without fixing them.
The function will automatically fix some of them and raise errors for the
others.
Issues will be checked and automatically fixed include:
- Time index is not monotonically increasing;
- Time index contains duplicated time stamps (fix by keeping first values);
- (optional) Time index attribute `freq` is missed while the index follows
a frequency;
- (optional) Time series include categorical (non-binary) label columns
(to fix by converting categorical labels into binary indicators).
Issues will be checked and raise error include:
- Wrong type of time series object (must be pandas Series or DataFrame);
- Wrong type of time index object (must be pandas DatetimeIndex).
Parameters
----------
ts: pandas Series or DataFrame
Time series to be validated.
check_freq: bool, optional
Whether to check time index attribute `freq` is missed. Default: True.
check_categorical: bool, optional
Whether to check time series include categorical (non-binary) label
columns. Default: False.
Returns
-------
pandas Series or DataFrame
Validated time series.
"""
ts = ts.copy()
# check input type
if not isinstance(ts, (pd.Series, pd.DataFrame)):
raise TypeError("Input is not a pandas Series or DataFrame object")
# check index type
if not isinstance(ts.index, pd.DatetimeIndex):
raise TypeError(
"Index of time series must be a pandas DatetimeIndex object."
)
# check duplicated
if any(ts.index.duplicated(keep="first")):
ts = ts[ts.index.duplicated(keep="first") == False]
# check sorted
if not ts.index.is_monotonic_increasing:
ts.sort_index(inplace=True)
# check time step frequency
if check_freq:
if (ts.index.freq is None) and (ts.index.inferred_freq is not None):
ts = ts.asfreq(ts.index.inferred_freq)
# convert categorical labels into binary indicators
if check_categorical:
if isinstance(ts, pd.DataFrame):
ts = | pd.get_dummies(ts) | pandas.get_dummies |
import os
import pickle
import numpy as np
import pandas as pd
def aggregate_meta_info(exp_dir):
files = [os.path.join(exp_dir, f) for f in os.listdir(exp_dir) if 'meta_info' in f]
df = | pd.DataFrame(columns=['pid', 'class_target', 'spacing']) | pandas.DataFrame |
import os
if not os.path.exists("temp"):
os.mkdir("temp")
def add_pi_obj_func_test():
import os
import pyemu
pst = os.path.join("utils","dewater_pest.pst")
pst = pyemu.optimization.add_pi_obj_func(pst,out_pst_name=os.path.join("temp","dewater_pest.piobj.pst"))
print(pst.prior_information.loc["pi_obj_func","equation"])
#pst._update_control_section()
assert pst.control_data.nprior == 1
def fac2real_test():
import os
import numpy as np
import pyemu
# pp_file = os.path.join("utils","points1.dat")
# factors_file = os.path.join("utils","factors1.dat")
# pyemu.utils.gw_utils.fac2real(pp_file,factors_file,
# out_file=os.path.join("utils","test.ref"))
pp_file = os.path.join("utils", "points2.dat")
factors_file = os.path.join("utils", "factors2.dat")
pyemu.geostats.fac2real(pp_file, factors_file,
out_file=os.path.join("temp", "test.ref"))
arr1 = np.loadtxt(os.path.join("utils","fac2real_points2.ref"))
arr2 = np.loadtxt(os.path.join("temp","test.ref"))
#print(np.nansum(np.abs(arr1-arr2)))
#print(np.nanmax(np.abs(arr1-arr2)))
nmax = np.nanmax(np.abs(arr1-arr2))
assert nmax < 0.01
# import matplotlib.pyplot as plt
# diff = (arr1-arr2)/arr1 * 100.0
# diff[np.isnan(arr1)] = np.nan
# p = plt.imshow(diff,interpolation='n')
# plt.colorbar(p)
# plt.show()
def vario_test():
import numpy as np
import pyemu
contribution = 0.1
a = 2.0
for const in [pyemu.utils.geostats.ExpVario,pyemu.utils.geostats.GauVario,
pyemu.utils.geostats.SphVario]:
v = const(contribution,a)
h = v._h_function(np.array([0.0]))
assert h == contribution
h = v._h_function(np.array([a*1000]))
assert h == 0.0
v2 = const(contribution,a,anisotropy=2.0,bearing=90.0)
print(v2._h_function(np.array([a])))
def aniso_test():
import pyemu
contribution = 0.1
a = 2.0
for const in [pyemu.utils.geostats.ExpVario,pyemu.utils.geostats.GauVario,
pyemu.utils.geostats.SphVario]:
v = const(contribution,a)
v2 = const(contribution,a,anisotropy=2.0,bearing=90.0)
v3 = const(contribution,a,anisotropy=2.0,bearing=0.0)
pt0 = (0,0)
pt1 = (1,0)
assert v.covariance(pt0,pt1) == v2.covariance(pt0,pt1)
pt0 = (0,0)
pt1 = (0,1)
assert v.covariance(pt0,pt1) == v3.covariance(pt0,pt1)
def geostruct_test():
import pyemu
v1 = pyemu.utils.geostats.ExpVario(0.1,2.0)
v2 = pyemu.utils.geostats.GauVario(0.1,2.0)
v3 = pyemu.utils.geostats.SphVario(0.1,2.0)
g = pyemu.utils.geostats.GeoStruct(0.2,[v1,v2,v3])
pt0 = (0,0)
pt1 = (0,0)
print(g.covariance(pt0,pt1))
assert g.covariance(pt0,pt1) == 0.5
pt0 = (0,0)
pt1 = (1.0e+10,0)
assert g.covariance(pt0,pt1) == 0.2
def struct_file_test():
import os
import pyemu
structs = pyemu.utils.geostats.read_struct_file(
os.path.join("utils","struct.dat"))
#print(structs[0])
pt0 = (0,0)
pt1 = (0,0)
for s in structs:
assert s.covariance(pt0,pt1) == s.nugget + \
s.variograms[0].contribution
with open(os.path.join("utils","struct_out.dat"),'w') as f:
for s in structs:
s.to_struct_file(f)
structs1 = pyemu.utils.geostats.read_struct_file(
os.path.join("utils","struct_out.dat"))
for s in structs1:
assert s.covariance(pt0,pt1) == s.nugget + \
s.variograms[0].contribution
def covariance_matrix_test():
import os
import pandas as pd
import pyemu
pts = pd.read_csv(os.path.join("utils","points1.dat"),delim_whitespace=True,
header=None,names=["name","x","y"],usecols=[0,1,2])
struct = pyemu.utils.geostats.read_struct_file(
os.path.join("utils","struct.dat"))[0]
struct.variograms[0].covariance_matrix(pts.x,pts.y,names=pts.name)
print(struct.covariance_matrix(pts.x,pts.y,names=pts.name).x)
def setup_ppcov_simple():
import os
import platform
exe_file = os.path.join("utils","ppcov.exe")
print(platform.platform())
if not os.path.exists(exe_file) or not platform.platform().lower().startswith("win"):
print("can't run ppcov setup")
return
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_test.dat")
args1 = [pts_file,'0.0',str_file,"struct1",os.path.join("utils","ppcov.struct1.out"),'','']
args2 = [pts_file,'0.0',str_file,"struct2",os.path.join("utils","ppcov.struct2.out"),'','']
args3 = [pts_file,'0.0',str_file,"struct3",os.path.join("utils","ppcov.struct3.out"),'','']
for args in [args1,args2,args3]:
in_file = os.path.join("utils","ppcov.in")
with open(in_file,'w') as f:
f.write('\n'.join(args))
os.system(exe_file + '<' + in_file)
def ppcov_simple_test():
import os
import numpy as np
import pandas as pd
import pyemu
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_test.dat")
mat1_file = os.path.join("utils","ppcov.struct1.out")
mat2_file = os.path.join("utils","ppcov.struct2.out")
mat3_file = os.path.join("utils","ppcov.struct3.out")
ppc_mat1 = pyemu.Cov.from_ascii(mat1_file)
ppc_mat2 = pyemu.Cov.from_ascii(mat2_file)
ppc_mat3 = pyemu.Cov.from_ascii(mat3_file)
pts = pd.read_csv(pts_file,header=None,names=["name","x","y"],usecols=[0,1,2],
delim_whitespace=True)
struct1,struct2,struct3 = pyemu.utils.geostats.read_struct_file(str_file)
print(struct1)
print(struct2)
print(struct3)
for mat,struct in zip([ppc_mat1,ppc_mat2,ppc_mat3],[struct1,struct2,struct3]):
str_mat = struct.covariance_matrix(x=pts.x,y=pts.y,names=pts.name)
print(str_mat.row_names)
delt = mat.x - str_mat.x
assert np.abs(delt).max() < 1.0e-7
def setup_ppcov_complex():
import os
import platform
exe_file = os.path.join("utils","ppcov.exe")
print(platform.platform())
if not os.path.exists(exe_file) or not platform.platform().lower().startswith("win"):
print("can't run ppcov setup")
return
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_complex.dat")
args1 = [pts_file,'0.0',str_file,"struct1",os.path.join("utils","ppcov.complex.struct1.out"),'','']
args2 = [pts_file,'0.0',str_file,"struct2",os.path.join("utils","ppcov.complex.struct2.out"),'','']
for args in [args1,args2]:
in_file = os.path.join("utils","ppcov.in")
with open(in_file,'w') as f:
f.write('\n'.join(args))
os.system(exe_file + '<' + in_file)
def ppcov_complex_test():
import os
import numpy as np
import pandas as pd
import pyemu
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_complex.dat")
mat1_file = os.path.join("utils","ppcov.complex.struct1.out")
mat2_file = os.path.join("utils","ppcov.complex.struct2.out")
ppc_mat1 = pyemu.Cov.from_ascii(mat1_file)
ppc_mat2 = pyemu.Cov.from_ascii(mat2_file)
pts = pd.read_csv(pts_file,header=None,names=["name","x","y"],usecols=[0,1,2],
delim_whitespace=True)
struct1,struct2 = pyemu.utils.geostats.read_struct_file(str_file)
print(struct1)
print(struct2)
for mat,struct in zip([ppc_mat1,ppc_mat2],[struct1,struct2]):
str_mat = struct.covariance_matrix(x=pts.x,y=pts.y,names=pts.name)
delt = mat.x - str_mat.x
print(mat.x[:,0])
print(str_mat.x[:,0])
print(np.abs(delt).max())
assert np.abs(delt).max() < 1.0e-7
#break
def pp_to_tpl_test():
import os
import pyemu
pp_file = os.path.join("utils","points1.dat")
pp_df = pyemu.pp_utils.pilot_points_to_tpl(pp_file,name_prefix="test_")
print(pp_df.columns)
def tpl_to_dataframe_test():
import os
import pyemu
pp_file = os.path.join("utils","points1.dat")
pp_df = pyemu.pp_utils.pilot_points_to_tpl(pp_file,name_prefix="test_")
df_tpl = pyemu.pp_utils.pp_tpl_to_dataframe(pp_file+".tpl")
assert df_tpl.shape[0] == pp_df.shape[0]
# def to_mps_test():
# import os
# import pyemu
# jco_file = os.path.join("utils","dewater_pest.jcb")
# jco = pyemu.Jco.from_binary(jco_file)
# #print(jco.x)
# pst = pyemu.Pst(jco_file.replace(".jcb",".pst"))
# #print(pst.nnz_obs_names)
# oc_dict = {oc:"l" for oc in pst.nnz_obs_names}
# obj_func = {name:1.0 for name in pst.par_names}
#
# #pyemu.optimization.to_mps(jco=jco_file)
# #pyemu.optimization.to_mps(jco=jco_file,obs_constraint_sense=oc_dict)
# #pyemu.optimization.to_mps(jco=jco_file,obj_func="h00_00")
# decision_var_names = pst.parameter_data.loc[pst.parameter_data.pargp=="q","parnme"].tolist()
# pyemu.optimization.to_mps(jco=jco_file,obj_func=obj_func,decision_var_names=decision_var_names,
# risk=0.975)
def setup_pp_test():
import os
import pyemu
try:
import flopy
except:
return
model_ws = os.path.join("..","examples","Freyberg","extra_crispy")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,check=False)
pp_dir = os.path.join("utils")
#ml.export(os.path.join("temp","test_unrot_grid.shp"))
sr = pyemu.helpers.SpatialReference().from_namfile(
os.path.join(ml.model_ws, ml.namefile),
delc=ml.dis.delc, delr=ml.dis.delr)
sr.rotation = 0.
par_info_unrot = pyemu.pp_utils.setup_pilotpoints_grid(sr=sr, prefix_dict={0: "hk1",1:"hk2"},
every_n_cell=2, pp_dir=pp_dir, tpl_dir=pp_dir,
shapename=os.path.join("temp", "test_unrot.shp"),
)
#print(par_info_unrot.parnme.value_counts())
gs = pyemu.geostats.GeoStruct(variograms=pyemu.geostats.ExpVario(a=1000,contribution=1.0))
ok = pyemu.geostats.OrdinaryKrige(gs,par_info_unrot)
ok.calc_factors_grid(sr)
sr2 = pyemu.helpers.SpatialReference.from_gridspec(
os.path.join(ml.model_ws, "test.spc"), lenuni=2)
par_info_drot = pyemu.pp_utils.setup_pilotpoints_grid(sr=sr2, prefix_dict={0: ["hk1_", "sy1_", "rch_"]},
every_n_cell=2, pp_dir=pp_dir, tpl_dir=pp_dir,
shapename=os.path.join("temp", "test_unrot.shp"),
)
ok = pyemu.geostats.OrdinaryKrige(gs, par_info_unrot)
ok.calc_factors_grid(sr2)
par_info_mrot = pyemu.pp_utils.setup_pilotpoints_grid(ml,prefix_dict={0:["hk1_","sy1_","rch_"]},
every_n_cell=2,pp_dir=pp_dir,tpl_dir=pp_dir,
shapename=os.path.join("temp","test_unrot.shp"))
ok = pyemu.geostats.OrdinaryKrige(gs, par_info_unrot)
ok.calc_factors_grid(ml.sr)
sr.rotation = 15
#ml.export(os.path.join("temp","test_rot_grid.shp"))
#pyemu.gw_utils.setup_pilotpoints_grid(ml)
par_info_rot = pyemu.pp_utils.setup_pilotpoints_grid(sr=sr,every_n_cell=2, pp_dir=pp_dir, tpl_dir=pp_dir,
shapename=os.path.join("temp", "test_rot.shp"))
ok = pyemu.geostats.OrdinaryKrige(gs, par_info_unrot)
ok.calc_factors_grid(sr)
print(par_info_unrot.x)
print(par_info_drot.x)
print(par_info_mrot.x)
print(par_info_rot.x)
def read_hob_test():
import os
import pyemu
hob_file = os.path.join("utils","HOB.txt")
df = pyemu.gw_utils.modflow_hob_to_instruction_file(hob_file)
print(df.obsnme)
def read_pval_test():
import os
import pyemu
pval_file = os.path.join("utils", "meras_trEnhance.pval")
pyemu.gw_utils.modflow_pval_to_template_file(pval_file)
def pp_to_shapefile_test():
import os
import pyemu
try:
import shapefile
except:
print("no pyshp")
return
pp_file = os.path.join("utils","points1.dat")
shp_file = os.path.join("temp","points1.dat.shp")
pyemu.pp_utils.write_pp_shapfile(pp_file)
def write_tpl_test():
import os
import pyemu
tpl_file = os.path.join("utils","test_write.tpl")
in_file = os.path.join("temp","tpl_test.dat")
par_vals = {"q{0}".format(i+1):12345678.90123456 for i in range(7)}
pyemu.pst_utils.write_to_template(par_vals,tpl_file,in_file)
def read_pestpp_runstorage_file_test():
import os
import pyemu
rnj_file = os.path.join("utils","freyberg.rnj")
#rnj_file = os.path.join("..", "..", "verification", "10par_xsec", "master_opt1","pest.rnj")
p1,o1 = pyemu.helpers.read_pestpp_runstorage(rnj_file)
p2,o2 = pyemu.helpers.read_pestpp_runstorage(rnj_file,9)
diff = p1 - p2
diff.sort_values("parval1",inplace=True)
def smp_to_ins_test():
import os
import pyemu
smp = os.path.join("utils","TWDB_wells.smp")
ins = os.path.join('temp',"test.ins")
try:
pyemu.pst_utils.smp_to_ins(smp,ins)
except:
pass
else:
raise Exception("should have failed")
pyemu.smp_utils.smp_to_ins(smp,ins,True)
def master_and_workers():
import shutil
import pyemu
worker_dir = os.path.join("..","verification","10par_xsec","template_mac")
master_dir = os.path.join("temp","master")
if not os.path.exists(master_dir):
os.mkdir(master_dir)
assert os.path.exists(worker_dir)
pyemu.helpers.start_workers(worker_dir,"pestpp","pest.pst",1,
worker_root="temp",master_dir=master_dir)
#now try it from within the master dir
base_cwd = os.getcwd()
os.chdir(master_dir)
pyemu.helpers.start_workers(os.path.join("..","..",worker_dir),
"pestpp","pest.pst",3,
master_dir='.')
os.chdir(base_cwd)
def first_order_pearson_regul_test():
import os
from pyemu import Schur
from pyemu.utils.helpers import first_order_pearson_tikhonov,zero_order_tikhonov
w_dir = "la"
sc = Schur(jco=os.path.join(w_dir,"pest.jcb"))
pt = sc.posterior_parameter
zero_order_tikhonov(sc.pst)
first_order_pearson_tikhonov(sc.pst,pt,reset=False)
print(sc.pst.prior_information)
sc.pst.rectify_pi()
assert sc.pst.control_data.pestmode == "regularization"
sc.pst.write(os.path.join('temp','test.pst'))
def zero_order_regul_test():
import os
import pyemu
pst = pyemu.Pst(os.path.join("pst","inctest.pst"))
pyemu.helpers.zero_order_tikhonov(pst)
print(pst.prior_information)
assert pst.control_data.pestmode == "regularization"
pst.write(os.path.join('temp','test.pst'))
pyemu.helpers.zero_order_tikhonov(pst,reset=False)
assert pst.prior_information.shape[0] == pst.npar_adj * 2
def kl_test():
import os
import numpy as np
import pandas as pd
import pyemu
import matplotlib.pyplot as plt
try:
import flopy
except:
print("flopy not imported...")
return
model_ws = os.path.join("..","verification","Freyberg","extra_crispy")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,check=False)
str_file = os.path.join("..","verification","Freyberg","structure.dat")
arr_tru = np.loadtxt(os.path.join("..","verification",
"Freyberg","extra_crispy",
"hk.truth.ref")) + 20
basis_file = os.path.join("utils","basis.jco")
tpl_file = os.path.join("utils","test.tpl")
factors_file = os.path.join("temp","factors.dat")
num_eig = 100
prefixes = ["hk1"]
df = pyemu.utils.helpers.kl_setup(num_eig=num_eig, sr=ml.sr,
struct=str_file,
factors_file=factors_file,
basis_file=basis_file,
prefixes=prefixes,islog=False)
basis = pyemu.Matrix.from_binary(basis_file)
basis = basis[:,:num_eig]
arr_tru = np.atleast_2d(arr_tru.flatten()).transpose()
proj = np.dot(basis.T.x,arr_tru)[:num_eig]
#proj.autoalign = False
back = np.dot(basis.x, proj)
back = back.reshape(ml.nrow,ml.ncol)
df.parval1 = proj
arr = pyemu.geostats.fac2real(df,factors_file,out_file=None)
fig = plt.figure(figsize=(10, 10))
ax1, ax2 = plt.subplot(121),plt.subplot(122)
mn,mx = arr_tru.min(),arr_tru.max()
print(arr.max(), arr.min())
print(back.max(),back.min())
diff = np.abs(back - arr)
print(diff.max())
assert diff.max() < 1.0e-5
def ok_test():
import os
import pandas as pd
import pyemu
str_file = os.path.join("utils","struct_test.dat")
pts_data = pd.DataFrame({"x":[1.0,2.0,3.0],"y":[0.,0.,0.],"name":["p1","p2","p3"]})
gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
ok = pyemu.utils.geostats.OrdinaryKrige(gs,pts_data)
interp_points = pts_data.copy()
kf = ok.calc_factors(interp_points.x,interp_points.y)
#for ptname in pts_data.name:
for i in kf.index:
assert len(kf.loc[i,"inames"])== 1
assert kf.loc[i,"ifacts"][0] == 1.0
assert sum(kf.loc[i,"ifacts"]) == 1.0
print(kf)
def ok_grid_test():
try:
import flopy
except:
return
import numpy as np
import pandas as pd
import pyemu
nrow,ncol = 10,5
delr = np.ones((ncol)) * 1.0/float(ncol)
delc = np.ones((nrow)) * 1.0/float(nrow)
num_pts = 0
ptx = np.random.random(num_pts)
pty = np.random.random(num_pts)
ptname = ["p{0}".format(i) for i in range(num_pts)]
pts_data = pd.DataFrame({"x":ptx,"y":pty,"name":ptname})
pts_data.index = pts_data.name
pts_data = pts_data.loc[:,["x","y","name"]]
sr = flopy.utils.SpatialReference(delr=delr,delc=delc)
pts_data.loc["i0j0", :] = [sr.xcentergrid[0,0],sr.ycentergrid[0,0],"i0j0"]
pts_data.loc["imxjmx", :] = [sr.xcentergrid[-1, -1], sr.ycentergrid[-1, -1], "imxjmx"]
str_file = os.path.join("utils","struct_test.dat")
gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
ok = pyemu.utils.geostats.OrdinaryKrige(gs,pts_data)
kf = ok.calc_factors_grid(sr,verbose=False,var_filename=os.path.join("temp","test_var.ref"),minpts_interp=1)
ok.to_grid_factors_file(os.path.join("temp","test.fac"))
def ok_grid_zone_test():
try:
import flopy
except:
return
import numpy as np
import pandas as pd
import pyemu
nrow,ncol = 10,5
delr = np.ones((ncol)) * 1.0/float(ncol)
delc = np.ones((nrow)) * 1.0/float(nrow)
num_pts = 0
ptx = np.random.random(num_pts)
pty = np.random.random(num_pts)
ptname = ["p{0}".format(i) for i in range(num_pts)]
pts_data = pd.DataFrame({"x":ptx,"y":pty,"name":ptname})
pts_data.index = pts_data.name
pts_data = pts_data.loc[:,["x","y","name"]]
sr = flopy.utils.SpatialReference(delr=delr,delc=delc)
pts_data.loc["i0j0", :] = [sr.xcentergrid[0,0],sr.ycentergrid[0,0],"i0j0"]
pts_data.loc["imxjmx", :] = [sr.xcentergrid[-1, -1], sr.ycentergrid[-1, -1], "imxjmx"]
pts_data.loc[:,"zone"] = 1
pts_data.zone.iloc[1] = 2
print(pts_data.zone.unique())
str_file = os.path.join("utils","struct_test.dat")
gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
ok = pyemu.utils.geostats.OrdinaryKrige(gs,pts_data)
zone_array = np.ones((nrow,ncol))
zone_array[0,0] = 2
kf = ok.calc_factors_grid(sr,verbose=False,
var_filename=os.path.join("temp","test_var.ref"),
minpts_interp=1,zone_array=zone_array)
ok.to_grid_factors_file(os.path.join("temp","test.fac"))
def ppk2fac_verf_test():
import os
import numpy as np
import pyemu
try:
import flopy
except:
return
ws = os.path.join("..","verification","Freyberg")
gspc_file = os.path.join(ws,"grid.spc")
pp_file = os.path.join(ws,"pp_00_pp.dat")
str_file = os.path.join(ws,"structure.complex.dat")
ppk2fac_facfile = os.path.join(ws,"ppk2fac_fac.dat")
pyemu_facfile = os.path.join("temp","pyemu_facfile.dat")
sr = flopy.utils.SpatialReference.from_gridspec(gspc_file)
ok = pyemu.utils.OrdinaryKrige(str_file,pp_file)
ok.calc_factors_grid(sr,maxpts_interp=10)
ok.to_grid_factors_file(pyemu_facfile)
zone_arr = np.loadtxt(os.path.join(ws,"extra_crispy","ref","ibound.ref"))
pyemu_arr = pyemu.utils.fac2real(pp_file,pyemu_facfile,out_file=None)
ppk2fac_arr = pyemu.utils.fac2real(pp_file,ppk2fac_facfile,out_file=None)
pyemu_arr[zone_arr == 0] = np.NaN
pyemu_arr[zone_arr == -1] = np.NaN
ppk2fac_arr[zone_arr == 0] = np.NaN
ppk2fac_arr[zone_arr == -1] = np.NaN
diff = np.abs(pyemu_arr - ppk2fac_arr)
print(diff)
assert np.nansum(diff) < 1.0e-6,np.nansum(diff)
# def opt_obs_worth():
# import os
# import pyemu
# wdir = os.path.join("utils")
# os.chdir(wdir)
# pst = pyemu.Pst(os.path.join("supply2_pest.fosm.pst"))
# zero_weight_names = [n for n,w in zip(pst.observation_data.obsnme,pst.observation_data.weight) if w == 0.0]
# #print(zero_weight_names)
# #for attr in ["base_jacobian","hotstart_resfile"]:
# # pst.pestpp_options[attr] = os.path.join(wdir,pst.pestpp_options[attr])
# #pst.template_files = [os.path.join(wdir,f) for f in pst.template_files]
# #pst.instruction_files = [os.path.join(wdir,f) for f in pst.instruction_files]
# #print(pst.template_files)
# df = pyemu.optimization.get_added_obs_importance(pst,obslist_dict={"zeros":zero_weight_names})
# os.chdir("..")
# print(df)
def mflist_budget_test():
import pyemu
import os
import pandas as pd
try:
import flopy
except:
print("no flopy...")
return
model_ws = os.path.join("..","examples","Freyberg_transient")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,check=False,load_only=[])
list_filename = os.path.join(model_ws,"freyberg.list")
assert os.path.exists(list_filename)
df = pyemu.gw_utils.setup_mflist_budget_obs(list_filename,start_datetime=ml.start_datetime)
print(df)
times = df.loc[df.index.str.startswith('vol_wells')].index.str.split(
'_', expand=True).get_level_values(2)[::100]
times = pd.to_datetime(times, yearfirst=True)
df = pyemu.gw_utils.setup_mflist_budget_obs(
list_filename, start_datetime=ml.start_datetime, specify_times=times)
flx, vol = pyemu.gw_utils.apply_mflist_budget_obs(
list_filename, 'flux.dat', 'vol.dat', start_datetime=ml.start_datetime,
times='budget_times.config'
)
assert (flx.index == vol.index).all()
assert (flx.index == times).all()
def mtlist_budget_test():
import pyemu
import pandas as pd
import os
try:
import flopy
except:
print("no flopy...")
return
list_filename = os.path.join("utils","mt3d.list")
assert os.path.exists(list_filename)
frun_line,ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename,start_datetime='1-1-1970')
assert len(ins_files) == 2
frun_line,ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename,start_datetime='1-1-1970', gw_prefix='')
assert len(ins_files) == 2
frun_line, ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename, start_datetime=None)
assert len(ins_files) == 2
list_filename = os.path.join("utils", "mt3d_imm_sor.lst")
assert os.path.exists(list_filename)
frun_line, ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename, start_datetime='1-1-1970')
def geostat_prior_builder_test():
import os
import numpy as np
import pyemu
pst_file = os.path.join("pst","pest.pst")
pst = pyemu.Pst(pst_file)
# print(pst.parameter_data)
tpl_file = os.path.join("utils", "pp_locs.tpl")
str_file = os.path.join("utils", "structure.dat")
cov = pyemu.helpers.geostatistical_prior_builder(pst_file,{str_file:tpl_file})
d1 = np.diag(cov.x)
df = pyemu.pp_utils.pp_tpl_to_dataframe(tpl_file)
df.loc[:,"zone"] = np.arange(df.shape[0])
gs = pyemu.geostats.read_struct_file(str_file)
cov = pyemu.helpers.geostatistical_prior_builder(pst_file,{gs:df},
sigma_range=4)
nnz = np.count_nonzero(cov.x)
assert nnz == pst.npar_adj
d2 = np.diag(cov.x)
assert np.array_equiv(d1, d2)
pst.parameter_data.loc[pst.par_names[1:10], "partrans"] = "tied"
pst.parameter_data.loc[pst.par_names[1:10], "partied"] = pst.par_names[0]
cov = pyemu.helpers.geostatistical_prior_builder(pst, {gs: df},
sigma_range=4)
nnz = np.count_nonzero(cov.x)
assert nnz == pst.npar_adj
ttpl_file = os.path.join("temp", "temp.dat.tpl")
with open(ttpl_file, 'w') as f:
f.write("ptf ~\n ~ temp1 ~\n")
pst.add_parameters(ttpl_file, ttpl_file.replace(".tpl", ""))
pst.parameter_data.loc["temp1", "parubnd"] = 1.1
pst.parameter_data.loc["temp1", "parlbnd"] = 0.9
cov = pyemu.helpers.geostatistical_prior_builder(pst, {str_file: tpl_file})
assert cov.shape[0] == pst.npar_adj
def geostat_draws_test():
import os
import numpy as np
import pyemu
pst_file = os.path.join("pst","pest.pst")
pst = pyemu.Pst(pst_file)
print(pst.parameter_data)
tpl_file = os.path.join("utils", "pp_locs.tpl")
str_file = os.path.join("utils", "structure.dat")
pe = pyemu.helpers.geostatistical_draws(pst_file,{str_file:tpl_file})
assert (pe.shape == pe.dropna().shape)
pst.parameter_data.loc[pst.par_names[1:10], "partrans"] = "tied"
pst.parameter_data.loc[pst.par_names[1:10], "partied"] = pst.par_names[0]
pe = pyemu.helpers.geostatistical_draws(pst, {str_file: tpl_file})
assert (pe.shape == pe.dropna().shape)
df = pyemu.pp_utils.pp_tpl_to_dataframe(tpl_file)
df.loc[:,"zone"] = np.arange(df.shape[0])
gs = pyemu.geostats.read_struct_file(str_file)
pe = pyemu.helpers.geostatistical_draws(pst_file,{gs:df},
sigma_range=4)
ttpl_file = os.path.join("temp", "temp.dat.tpl")
with open(ttpl_file, 'w') as f:
f.write("ptf ~\n ~ temp1 ~\n")
pst.add_parameters(ttpl_file, ttpl_file.replace(".tpl", ""))
pst.parameter_data.loc["temp1", "parubnd"] = 1.1
pst.parameter_data.loc["temp1", "parlbnd"] = 0.9
pst.parameter_data.loc[pst.par_names[1:10],"partrans"] = "tied"
pst.parameter_data.loc[pst.par_names[1:10], "partied"] = pst.par_names[0]
pe = pyemu.helpers.geostatistical_draws(pst, {str_file: tpl_file})
assert (pe.shape == pe.dropna().shape)
# def linearuniversal_krige_test():
# try:
# import flopy
# except:
# return
#
# import numpy as np
# import pandas as pd
# import pyemu
# nrow,ncol = 10,5
# delr = np.ones((ncol)) * 1.0/float(ncol)
# delc = np.ones((nrow)) * 1.0/float(nrow)
#
# num_pts = 0
# ptx = np.random.random(num_pts)
# pty = np.random.random(num_pts)
# ptname = ["p{0}".format(i) for i in range(num_pts)]
# pts_data = pd.DataFrame({"x":ptx,"y":pty,"name":ptname})
# pts_data.index = pts_data.name
# pts_data = pts_data.loc[:,["x","y","name"]]
#
#
# sr = flopy.utils.SpatialReference(delr=delr,delc=delc)
# pts_data.loc["i0j0", :] = [sr.xcentergrid[0,0],sr.ycentergrid[0,0],"i0j0"]
# pts_data.loc["imxjmx", :] = [sr.xcentergrid[-1, -1], sr.ycentergrid[-1, -1], "imxjmx"]
# pts_data.loc["i0j0","value"] = 1.0
# pts_data.loc["imxjmx","value"] = 0.0
#
# str_file = os.path.join("utils","struct_test.dat")
# gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
# luk = pyemu.utils.geostats.LinearUniversalKrige(gs,pts_data)
# df = luk.estimate_grid(sr,verbose=True,
# var_filename=os.path.join("utils","test_var.ref"),
# minpts_interp=1)
def gslib_2_dataframe_test():
import os
import pyemu
gslib_file = os.path.join("utils","ch91pt.shp.gslib")
df = pyemu.geostats.gslib_2_dataframe(gslib_file)
print(df)
def sgems_to_geostruct_test():
import os
import pyemu
xml_file = os.path.join("utils", "ch00")
gs = pyemu.geostats.read_sgems_variogram_xml(xml_file)
def load_sgems_expvar_test():
import os
import numpy as np
#import matplotlib.pyplot as plt
import pyemu
dfs = pyemu.geostats.load_sgems_exp_var(os.path.join("utils","ch00_expvar"))
xmn,xmx = 1.0e+10,-1.0e+10
for d,df in dfs.items():
xmn = min(xmn,df.x.min())
xmx = max(xmx,df.x.max())
xml_file = os.path.join("utils", "ch00")
gs = pyemu.geostats.read_sgems_variogram_xml(xml_file)
v = gs.variograms[0]
#ax = gs.plot(ls="--")
#plt.show()
#x = np.linspace(xmn,xmx,100)
#y = v.inv_h(x)
#
#plt.plot(x,y)
#plt.show()
def read_hydmod_test():
import os
import numpy as np
import pandas as pd
import pyemu
try:
import flopy
except:
return
df, outfile = pyemu.gw_utils.modflow_read_hydmod_file(os.path.join('utils','freyberg.hyd.bin'),
os.path.join('temp','freyberg.hyd.bin.dat'))
df = pd.read_csv(os.path.join('temp', 'freyberg.hyd.bin.dat'), delim_whitespace=True)
dftrue = pd.read_csv(os.path.join('utils', 'freyberg.hyd.bin.dat.true'), delim_whitespace=True)
assert np.allclose(df.obsval.values, dftrue.obsval.values)
def make_hydmod_insfile_test():
import os
import shutil
import pyemu
try:
import flopy
except:
return
shutil.copy2(os.path.join('utils','freyberg.hyd.bin'),os.path.join('temp','freyberg.hyd.bin'))
pyemu.gw_utils.modflow_hydmod_to_instruction_file(os.path.join('temp','freyberg.hyd.bin'))
#assert open(os.path.join('utils','freyberg.hyd.bin.dat.ins'),'r').read() == open('freyberg.hyd.dat.ins', 'r').read()
assert os.path.exists(os.path.join('temp','freyberg.hyd.bin.dat.ins'))
def plot_summary_test():
import os
import pandas as pd
import pyemu
try:
import matplotlib.pyplot as plt
except:
return
par_df = pd.read_csv(os.path.join("utils","freyberg_pp.par.usum.csv"),
index_col=0)
idx = list(par_df.index.map(lambda x: x.startswith("HK")))
par_df = par_df.loc[idx,:]
ax = pyemu.plot_utils.plot_summary_distributions(par_df,label_post=True)
plt.savefig(os.path.join("temp","hk_par.png"))
plt.close()
df = os.path.join("utils","freyberg_pp.pred.usum.csv")
figs,axes = pyemu.plot_utils.plot_summary_distributions(df,subplots=True)
#plt.show()
for i,fig in enumerate(figs):
plt.figure(fig.number)
plt.savefig(os.path.join("temp","test_pred_{0}.png".format(i)))
plt.close(fig)
df = os.path.join("utils","freyberg_pp.par.usum.csv")
figs, axes = pyemu.plot_utils.plot_summary_distributions(df,subplots=True)
for i,fig in enumerate(figs):
plt.figure(fig.number)
plt.savefig(os.path.join("temp","test_par_{0}.png".format(i)))
plt.close(fig)
def hds_timeseries_test():
import os
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
model_ws =os.path.join("..","examples","Freyberg_transient")
org_hds_file = os.path.join(model_ws, "freyberg.hds")
hds_file = os.path.join("temp", "freyberg.hds")
org_cbc_file = org_hds_file.replace(".hds",".cbc")
cbc_file = hds_file.replace(".hds", ".cbc")
shutil.copy2(org_hds_file, hds_file)
shutil.copy2(org_cbc_file, cbc_file)
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=model_ws, check=False)
kij_dict = {"test1": [0, 0, 0], "test2": (1, 1, 1), "test": (0, 10, 14)}
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True)
# m.change_model_ws("temp",reset_external=True)
# m.write_input()
# pyemu.os_utils.run("mfnwt freyberg.nam",cwd="temp")
cmd, df1 = pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, include_path=True, prefix="stor",
text="storage", fill=0.0)
cmd,df2 = pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, model=m, include_path=True, prefix="stor",
text="storage",fill=0.0)
print(df1)
d = np.abs(df1.obsval.values - df2.obsval.values)
print(d.max())
assert d.max() == 0.0,d
try:
pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, model=m, include_path=True, prefix="consthead",
text="constant head")
except:
pass
else:
raise Exception("should have failed")
try:
pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, model=m, include_path=True, prefix="consthead",
text="JUNK")
except:
pass
else:
raise Exception("should have failed")
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True,prefix="hds")
m = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,load_only=[],check=False)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict,model=m,include_path=True)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True,prefix="hds")
org_hds_file = os.path.join("utils", "MT3D001.UCN")
hds_file = os.path.join("temp", "MT3D001.UCN")
shutil.copy2(org_hds_file, hds_file)
kij_dict = {"test1": [0, 0, 0], "test2": (1, 1, 1)}
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True, prefix="hds")
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=model_ws, load_only=[], check=False)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True, prefix="hds")
# df1 = pd.read_csv(out_file, delim_whitespace=True)
# pyemu.gw_utils.apply_hds_obs(hds_file)
# df2 = pd.read_csv(out_file, delim_whitespace=True)
# diff = df1.obsval - df2.obsval
def grid_obs_test():
import os
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
m_ws = os.path.join("..", "examples", "freyberg_sfr_update")
org_hds_file = os.path.join("..","examples","Freyberg_Truth","freyberg.hds")
org_multlay_hds_file = os.path.join(m_ws, "freyberg.hds") # 3 layer version
org_ucn_file = os.path.join(m_ws, "MT3D001.UCN") # mt example
hds_file = os.path.join("temp","freyberg.hds")
multlay_hds_file = os.path.join("temp", "freyberg_3lay.hds")
ucn_file = os.path.join("temp", "MT3D001.UCN")
out_file = hds_file+".dat"
multlay_out_file = multlay_hds_file+".dat"
ucn_out_file = ucn_file+".dat"
shutil.copy2(org_hds_file,hds_file)
shutil.copy2(org_multlay_hds_file, multlay_hds_file)
shutil.copy2(org_ucn_file, ucn_file)
pyemu.gw_utils.setup_hds_obs(hds_file)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert abs(diff.max()) < 1.0e-6, abs(diff.max())
pyemu.gw_utils.setup_hds_obs(multlay_hds_file)
df1 = pd.read_csv(multlay_out_file,delim_whitespace=True)
assert len(df1) == 3*len(df2), "{} != 3*{}".format(len(df1), len(df2))
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval,df2.obsval), abs(diff.max())
pyemu.gw_utils.setup_hds_obs(hds_file,skip=-999)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert diff.max() < 1.0e-6
pyemu.gw_utils.setup_hds_obs(ucn_file, skip=1.e30, prefix='ucn')
df1 = pd.read_csv(ucn_out_file, delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(ucn_file)
df2 = pd.read_csv(ucn_out_file, delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
# skip = lambda x : x < -888.0
skip = lambda x: x if x > -888.0 else np.NaN
pyemu.gw_utils.setup_hds_obs(hds_file,skip=skip)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = | pd.read_csv(out_file,delim_whitespace=True) | pandas.read_csv |
"""
Functions for converting object to other types
"""
import numpy as np
import pandas as pd
from pandas.core.common import (_possibly_cast_to_datetime, is_object_dtype,
isnull)
import pandas.lib as lib
# TODO: Remove in 0.18 or 2017, which ever is sooner
def _possibly_convert_objects(values, convert_dates=True, convert_numeric=True,
convert_timedeltas=True, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
# if we have passed in a list or scalar
if isinstance(values, (list, tuple)):
values = np.array(values, dtype=np.object_)
if not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
# convert dates
if convert_dates and values.dtype == np.object_:
# we take an aggressive stance and convert to datetime64[ns]
if convert_dates == 'coerce':
new_values = _possibly_cast_to_datetime(values, 'M8[ns]',
errors='coerce')
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(values,
convert_datetime=convert_dates)
# convert timedeltas
if convert_timedeltas and values.dtype == np.object_:
if convert_timedeltas == 'coerce':
from pandas.tseries.timedeltas import to_timedelta
new_values = | to_timedelta(values, coerce=True) | pandas.tseries.timedeltas.to_timedelta |
import pandas as pd # DataFrame Library
import tensorflow as tf # Tensorflow, library to develop and train ML models
import matplotlib.pyplot as plt # Plotting Library
from Models.myanfis import ANFIS # ANFIS model from: https://github.com/gregorLen/AnfisTensorflow2.0
from Models.myanfis import fis_parameters # Model Configuration class
from sklearn.utils import shuffle # For shuffling the dataset
from sklearn.decomposition import PCA # For dimensionality reduction
from sklearn.model_selection import KFold # k-fold Cross
from sklearn.preprocessing import MinMaxScaler # For converting negative ranges into [0,1] range
# Read the dataset, shuffle it, get the first 1000 sample of it
df = shuffle( | pd.read_csv("winequality-red.csv") | pandas.read_csv |
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team and 2021 Zilliz.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import torch
from pandas import Series
from torch.utils.data import Dataset
from PIL import Image
from torchvision import transforms
from typing import Tuple
class PyTorchImageDataset(Dataset):
"""
PyTorchImageDataset is a dataset class for training.
Args:
image_path (:obj:`str`):
Path to the images for your dataset.
label_file (:obj:`str`):
Path to your label file. The label file should be a csv file. The columns in this file should be
[image_name, category], 'image_name' is the path of your images, 'category' is the label of accordance
image. For example: [image_name, dog] for one row. Note that the first row should be[image_name, category]
data_transform (:obj:`Compose`):
PyTorch transform of the input images.
"""
def __init__(self, image_path: str, label_file: str, data_transform: transforms.Compose = None):
self.image_path = image_path
self.label_file = label_file
self.data_transform = data_transform
df = | pd.read_csv(self.label_file) | pandas.read_csv |
"""Unittests for the map module."""
import unittest
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pygeos
import pyproj
import geopandas as gpd
import shapely.wkt
import numpy.testing as npt
import gnssmapper.common as cm
import gnssmapper.geo as geo
class TestObservationMethods(unittest.TestCase):
def setUp(self):
self.rays = gpd.GeoSeries([shapely.geometry.LineString([[527990, 183005, 0], [528020, 183005, 15]]),
shapely.geometry.LineString([[527990, 183005, 10], [528020, 183005, 25]])],
crs="epsg:27700")
def test_rays(self) -> None:
r = [[0, 0, 0], [1, 1, 1]]
s = [[10000, 0, 0],[10001, 1, 1]]
expected = [pygeos.Geometry("LineString (0 0 0,1000 0 0)"), pygeos.Geometry("LineString (1 1 1,1001 1 1)")]
out=geo.rays(r,s)
self.assertTrue(np.all(pygeos.predicates.equals(out,expected)))
def test_to_crs(self) -> None:
target = pyproj.crs.CRS(cm.constants.epsg_wgs84)
transformed= geo.to_crs(self.rays,target)
self.assertTrue(np.all(s.has_z for s in transformed))
self.assertEqual(target,transformed.crs)
df = gpd.GeoDataFrame(geometry = self.rays,crs=self.rays.crs)
transformed_df = geo.to_crs(df,target)
self.assertTrue(np.all(s.has_z for s in transformed_df.geometry))
self.assertEqual(target,transformed_df.crs)
class TestShapelyMethods(unittest.TestCase):
def setUp(self):
self.building = shapely.wkt.loads("POLYGON((528010 183010, 528010 183000,528000 183000, 528000 183010,528010 183010))")
def test_intersection(self):
five = shapely.geometry.LineString([[527990,183005,0],[528020,183005,15]])
point = geo.intersection([five],[self.building],[10])
self.assertAlmostEqual(np.array(point[0])[2],5)
def test_intersection_projected(self):
fifteen = shapely.geometry.LineString([[527990,183005,10],[528020,183005,25]])
point = geo.intersection_projected([fifteen], [self.building])
npt.assert_array_almost_equal(np.array(list(point)[0].coords).flatten(), [528000, 183005, 15])
inside = shapely.geometry.LineString([[528005,183005,10],[528020,183005,25]])
inside_point = geo.intersection_projected([inside], [self.building])
npt.assert_array_almost_equal(np.array(list(inside_point)[0].coords).flatten(), [528010, 183005, 15])
outside = shapely.geometry.LineString([[527990,183015,10],[528020,183015,25]])
outside_point = geo.intersection_projected([outside], [self.building])
self.assertTrue(list(outside_point)[0].is_empty)
empty = shapely.geometry.LineString()
empty_point = geo.intersection_projected([empty], [self.building])
self.assertTrue(list(empty_point)[0].is_empty)
def test_intersection_projected_height(self):
fifteen = shapely.geometry.LineString([[527990,183005,10],[528020,183005,25]])
point = geo.intersection_projected_height([fifteen],[self.building])
self.assertAlmostEqual(point[0],15)
def test_intersects(self):
five = shapely.geometry.LineString([[527990, 183005, 0], [528020, 183005, 15]])
fifteen = shapely.geometry.LineString([[527990, 183005, 10], [528020, 183005, 25]])
rays = [five, fifteen]
buildings = [self.building, self.building]
heights=[10,10]
npt.assert_array_almost_equal(geo.intersects(rays,buildings,heights),[True,False])
class TestFresnel(unittest.TestCase):
def setUp(self):
self.buildings = [shapely.wkt.loads("POLYGON((528010 183010, 528010 183000,528000 183000, 528000 183010,528010 183010))")]
def test_fresnel_integral(self):
v=np.array([-1,0,1,2.4])
o=np.array([-20*np.log(1.12),-20*np.log(0.5),-20*np.log(0.4-(0.1184-0.28**2)**0.5),-20*np.log(0.225/2.4)])
npt.assert_almost_equal(geo.fresnel_integral(v),o)
def test_fresnel_parameter(self):
five = shapely.geometry.LineString([[527990,183005,5],[528020,183005,5]])
point = shapely.geometry.Point([528000,183005,7])
expected= 2 *( 2 / (0.1903 * 10))**0.5
self.assertAlmostEqual(geo.fresnel_parameter([five],[point])[0],expected)
def test_get_fresnel_single(self):
five = shapely.geometry.LineString([[527990,183005,0],[528020,183005,15]])
expected=geo.fresnel_integral([5 *( 2 / (0.1903 * 10))**0.5])
self.assertAlmostEqual(geo.get_fresnel(five,self.buildings,[10]),expected[0])
def test_get_fresnel_multi(self):
#not yet been tested
pass
class TestMapMethods(unittest.TestCase):
def setUp(self):
self.map_box = gpd.GeoDataFrame({'height': [10]},
geometry=[shapely.wkt.loads("POLYGON((528010 183010, 528010 183000,528000 183000, 528000 183010,528010 183010))")],
crs="epsg:27700",index=[1])
self.map_canyon =gpd.GeoDataFrame({'height': [10,10]},
geometry=list(shapely.wkt.loads("MULTIPOLYGON(((528010 183010, 528010 183000,528000 183000, 528000 183010,528010 183010)),((528030 183010, 528030 183000,528020 183000, 528020 183010,528030 183010)))")),
crs="epsg:27700",index=[3,4])
self.rays_box = gpd.GeoSeries([shapely.geometry.LineString([[527990, 183005, 0], [528020, 183005, 15]]),
shapely.geometry.LineString([[527990, 183005, 10], [528020, 183005, 25]])],
crs="epsg:27700",index=[1,2])
self.rays_canyon = gpd.GeoSeries([shapely.geometry.LineString([(527990, 183005, 5), (528015, 183005, 5)]),
shapely.geometry.LineString([(528015, 183005, 9), (528035, 183005, 9)])],
crs="epsg:27700",index=[1,2])
def test_map_to_crs(self):
output = geo.map_to_crs(self.map_box, cm.constants.epsg_wgs84)
cm.check.check_type(output,'map',raise_errors=True)
same = geo.map_to_crs(self.map_box, "epsg:27700")
pdt.assert_frame_equal(self.map_box,same,check_dtype=False)
reverted = geo.map_to_crs(output, "epsg:27700")
reverted=reverted.set_geometry(pygeos.geometry.set_precision(reverted.geometry.array.data,1))
| pdt.assert_frame_equal(self.map_box,reverted,check_dtype=False,atol=0.1,rtol=0.1) | pandas.testing.assert_frame_equal |
# networkx experimentation and link graph plotting tests
# not in active use for the search engine but left here for reference
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import sqlite3
from nltk import FreqDist
from networkx.drawing.nx_agraph import graphviz_layout
import spacy
nlp = spacy.load("en_core_web_md")
def plot_keyword_frequency():
connection = sqlite3.connect("search.db")
cursor = connection.cursor()
keywords = cursor.execute("SELECT keywords FROM posts;").fetchall()
all_keywords = []
for k in keywords:
for l in k[0].split(", "):
all_keywords.append(l)
distribution = FreqDist(all_keywords)
print(distribution.most_common(20))
distribution.plot(20)
def plot_linkrot_by_date():
plt.figure()
plt.title("Linkrot by date")
plt.xlabel("Date")
plt.ylabel("Number of URLs")
pd.read_csv("data/external_link_status.csv").groupby(["status_code"]).count()["url"].plot(kind="line")
pd.read_csv("data/external_link_status.csv").groupby(["status_code"]).count()["url"].plot(kind="line")
# df = df[df["status_code"] == 200]
# df.plot(kind="line", x="date", y="status_code")
plt.show()
plt.savefig("charts/linkrot_by_date.png")
def keyword_knowledge_graph():
keywords = []
G = nx.Graph()
connection = sqlite3.connect("search.db")
cursor = connection.cursor()
rows = cursor.execute("SELECT keywords, url FROM posts;").fetchall()
count = 0
for r in rows:
if count == 150:
break
post_keywords = []
# for keyword in r[0].split(", "):
# # if "what is" in keyword:
# G.add_node(keyword)
# if len(post_keywords) > 0:
# G.add_edge(post_keywords[0], keyword)
for keyword in r[0].split(", "):
keyword = keyword.replace("the", "").replace("this", "")
post_keywords.append(keyword)
G.add_node(keyword)
# if not keyword.islower():
# G.add_edge("proper noun", keyword)
for k in post_keywords:
G.add_edge(post_keywords[0], k)
count += 1
nx.draw(G, with_labels=True)
plt.plot()
plt.show()
print([n for n in G.neighbors("coffee") if "coffee" in n.lower() and n.islower()][:7])
# get coffee edge
# for n in G.neighbors(to_check):
# print(n)
# print(nlp(n).similarity(nlp(to_check)))
# if nlp(n).similarity(nlp(to_check)):
# print(nlp(n).similarity(nlp(to_check)))
#plt.show()
return G
def show_error_codes():
df = pd.read_csv("data/external_link_status.csv")
plt.suptitle("Error codes returned by external links (n={})".format(len(df)))
plt.xlabel("Error code")
plt.ylabel("Number of URLs")
df.groupby("status_code").count()["url"].plot(kind="bar")
plt.show()
plt.save("static/error_codes.png")
def get_internal_link_count():
# Read out.csv into dataframe
df = | pd.read_csv("data/all_links.csv") | pandas.read_csv |
import logging
from typing import List
import numpy as np
import pandas as pd
from cuchemcommon.data import GenerativeWfDao
from cuchemcommon.data.generative_wf import ChemblGenerativeWfDao
from cuchemcommon.fingerprint import Embeddings
from cuchemcommon.utils.singleton import Singleton
from cuchemcommon.workflow import BaseGenerativeWorkflow
from cuchem.utils.data_peddler import download_cddd_models
logger = logging.getLogger(__name__)
class Cddd(BaseGenerativeWorkflow, metaclass=Singleton):
def __init__(self, dao: GenerativeWfDao = ChemblGenerativeWfDao(None)) -> None:
super().__init__(dao)
self.default_model_loc = download_cddd_models()
self.dao = dao
self.cddd_embeddings = Embeddings(model_dir=self.default_model_loc)
self.min_jitter_radius = 0.5
def smiles_to_embedding(self, smiles: str, padding: int):
embedding = self.cddd_embeddings.func.seq_to_emb(smiles).squeeze()
return embedding
def embedding_to_smiles(self,
embedding,
dim: int,
pad_mask):
return self.cddd_embeddings.inverse_transform(embedding)
def find_similars_smiles_list(self,
smiles: str,
num_requested: int = 10,
scaled_radius=None,
force_unique=False):
radius = self._compute_radius(scaled_radius)
embedding = self.cddd_embeddings.func.seq_to_emb(smiles).squeeze()
embeddings = self.addjitter(embedding, radius, cnt=num_requested)
neighboring_embeddings = np.concatenate([embedding.reshape(1, embedding.shape[0]),
embeddings])
embeddings = [embedding] + embeddings
return self.cddd_embeddings.inverse_transform(neighboring_embeddings), embeddings
def find_similars_smiles(self,
smiles: str,
num_requested: int = 10,
scaled_radius=None,
force_unique=False):
generated_mols, neighboring_embeddings = self.find_similars_smiles_list(smiles,
num_requested=num_requested,
scaled_radius=scaled_radius,
force_unique=force_unique)
dims = []
for neighboring_embedding in neighboring_embeddings:
dims.append(neighboring_embedding.shape)
generated_df = pd.DataFrame({'SMILES': generated_mols,
'embeddings': neighboring_embeddings,
'embeddings_dim': dims,
'Generated': [True for i in range(len(generated_mols))]})
generated_df.iat[0, 2] = False
if force_unique:
generated_df = self.compute_unique_smiles(generated_df,
self.cddd_embeddings.inverse_transform,
scaled_radius=scaled_radius)
return generated_df
def interpolate_smiles(self,
smiles: List,
num_points: int = 10,
scaled_radius=None,
force_unique=False):
num_points = int(num_points) + 2
if len(smiles) < 2:
raise Exception('At-least two or more smiles are expected')
def linear_interpolate_points(embedding, num_points):
return np.linspace(embedding[0], embedding[1], num_points)
result_df = []
for idx in range(len(smiles) - 1):
data = | pd.DataFrame({'transformed_smiles': [smiles[idx], smiles[idx + 1]]}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# #### Importing dataset
# 1.Since data is in form of excel file we have to use pandas read_excel to load the data
# 2.After loading it is important to check null values in a column or a row
# 3.If it is present then following can be done,
# a.Filling NaN values with mean, median and mode using fillna() method
# b.If Less missing values, we can drop it as well
#
# In[2]:
train_data=pd.read_excel('E:\End-2-end Projects\Flight_Price/Data_Train.xlsx')
# In[3]:
train_data.head()
# In[4]:
train_data.info()
# In[5]:
train_data.isnull().sum()
# #### as less missing values,I can directly drop these
# In[6]:
train_data.dropna(inplace=True)
# In[7]:
train_data.isnull().sum()
# In[8]:
train_data.dtypes
# In[ ]:
# #### From description we can see that Date_of_Journey is a object data type,
# Therefore, we have to convert this datatype into timestamp so as to use this column properly for prediction,bcz our
# model will not be able to understand Theses string values,it just understand Time-stamp
# For this we require pandas to_datetime to convert object data type to datetime dtype.
#
#
# dt.day method will extract only day of that date
# dt.month method will extract only month of that date
# In[9]:
def change_into_datetime(col):
train_data[col]=pd.to_datetime(train_data[col])
# In[10]:
train_data.columns
# In[11]:
for i in ['Date_of_Journey','Dep_Time', 'Arrival_Time']:
change_into_datetime(i)
# In[12]:
train_data.dtypes
# In[ ]:
# In[ ]:
# In[13]:
train_data['Journey_day']=train_data['Date_of_Journey'].dt.day
# In[14]:
train_data['Journey_month']=train_data['Date_of_Journey'].dt.month
# In[15]:
train_data.head()
# In[ ]:
# In[16]:
## Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use.
train_data.drop('Date_of_Journey', axis=1, inplace=True)
# In[ ]:
# In[ ]:
# In[17]:
train_data.head()
# In[ ]:
# In[18]:
def extract_hour(df,col):
df[col+"_hour"]=df[col].dt.hour
# In[19]:
def extract_min(df,col):
df[col+"_minute"]=df[col].dt.minute
# In[20]:
def drop_column(df,col):
df.drop(col,axis=1,inplace=True)
# In[ ]:
# In[21]:
# Departure time is when a plane leaves the gate.
# Similar to Date_of_Journey we can extract values from Dep_Time
extract_hour(train_data,'Dep_Time')
# In[22]:
# Extracting Minutes
extract_min(train_data,'Dep_Time')
# In[23]:
# Now we can drop Dep_Time as it is of no use
drop_column(train_data,'Dep_Time')
# In[24]:
train_data.head()
# In[ ]:
# In[25]:
# Arrival time is when the plane pulls up to the gate.
# Similar to Date_of_Journey we can extract values from Arrival_Time
# Extracting Hours
extract_hour(train_data,'Arrival_Time')
# Extracting minutes
extract_min(train_data,'Arrival_Time')
# Now we can drop Arrival_Time as it is of no use
drop_column(train_data,'Arrival_Time')
# In[26]:
train_data.head()
# In[ ]:
# In[27]:
'2h 50m'.split(' ')
# In[ ]:
# #### Lets Apply pre-processing on duration column,Separate Duration hours and minute from duration
# In[28]:
duration=list(train_data['Duration'])
for i in range(len(duration)):
if len(duration[i].split(' '))==2:
pass
else:
if 'h' in duration[i]: # Check if duration contains only hour
duration[i]=duration[i] + ' 0m' # Adds 0 minute
else:
duration[i]='0h '+ duration[i] # if duration contains only second, Adds 0 hour
# In[29]:
train_data['Duration']=duration
# In[30]:
train_data.head()
# In[31]:
'2h 50m'.split(' ')[1][0:-1]
# In[ ]:
# In[32]:
def hour(x):
return x.split(' ')[0][0:-1]
# In[33]:
def min(x):
return x.split(' ')[1][0:-1]
# In[34]:
train_data['Duration_hours']=train_data['Duration'].apply(hour)
train_data['Duration_mins']=train_data['Duration'].apply(min)
# In[35]:
train_data.head()
# In[36]:
train_data.drop('Duration',axis=1,inplace=True)
# In[37]:
train_data.head()
# In[38]:
train_data.dtypes
# In[39]:
train_data['Duration_hours']=train_data['Duration_hours'].astype(int)
train_data['Duration_mins']=train_data['Duration_mins'].astype(int)
# In[40]:
train_data.dtypes
# In[41]:
train_data.head()
# In[42]:
train_data.dtypes
# In[43]:
cat_col=[col for col in train_data.columns if train_data[col].dtype=='O']
cat_col
# In[44]:
cont_col=[col for col in train_data.columns if train_data[col].dtype!='O']
cont_col
# ### Handling Categorical Data
#
# #### We are using 2 main Encoding Techniques to convert Categorical data into some numerical format
# Nominal data --> data are not in any order --> OneHotEncoder is used in this case
# Ordinal data --> data are in order --> LabelEncoder is used in this case
# In[45]:
categorical=train_data[cat_col]
categorical.head()
# In[46]:
categorical['Airline'].value_counts()
# In[ ]:
# #### Airline vs Price Analysis
# In[47]:
plt.figure(figsize=(15,5))
sns.boxplot(y='Price',x='Airline',data=train_data.sort_values('Price',ascending=False))
# In[ ]:
# ##### Conclusion--> From graph we can see that Jet Airways Business have the highest Price., Apart from the first Airline almost all are having similar median
# In[ ]:
# #### Perform Total_Stops vs Price Analysis
# In[48]:
plt.figure(figsize=(15,5))
sns.boxplot(y='Price',x='Total_Stops',data=train_data.sort_values('Price',ascending=False))
# In[49]:
len(categorical['Airline'].unique())
# In[50]:
# As Airline is Nominal Categorical data we will perform OneHotEncoding
Airline=pd.get_dummies(categorical['Airline'], drop_first=True)
Airline.head()
# In[51]:
categorical['Source'].value_counts()
# In[52]:
# Source vs Price
plt.figure(figsize=(15,5))
sns.catplot(y='Price',x='Source',data=train_data.sort_values('Price',ascending=False),kind='boxen')
# In[53]:
# As Source is Nominal Categorical data we will perform OneHotEncoding
Source=pd.get_dummies(categorical['Source'], drop_first=True)
Source.head()
# In[54]:
categorical['Destination'].value_counts()
# In[55]:
# As Destination is Nominal Categorical data we will perform OneHotEncoding
Destination= | pd.get_dummies(categorical['Destination'], drop_first=True) | pandas.get_dummies |
import zipfile
import os
import numpy as np
import pandas as pd
from pathlib import Path
__version__ = '0.155'
try:
from functools import lru_cache
except (ImportError, AttributeError):
# don't know how to tell setup.py that we only need functools32 when under 2.7.
# so we'll just include a copy (*bergh*)
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "functools32"))
from functools32 import lru_cache
class WideNotSupported(ValueError):
def __init__(self):
self.message = (
".get_wide() is not supported for this dataset. Use .get_dataset() instead"
)
class CantApplyExclusion(ValueError):
pass
datasets_to_cache = 32
known_compartment_columns = [
"compartment",
"cell_type",
"disease",
"culture_method", # for those cells we can't take into sequencing ex vivo
# these are only for backward compability
"tissue",
"disease-state",
] # tissue
def lazy_member(field):
"""Evaluate a function once and store the result in the member (an object specific in-memory cache)
Beware of using the same name in subclasses!
"""
def decorate(func):
if field == func.__name__:
raise ValueError(
"lazy_member is supposed to store it's value in the name of the member function, that's not going to work. Please choose another name (prepend an underscore..."
)
def doTheThing(*args, **kw):
if not hasattr(args[0], field):
setattr(args[0], field, func(*args, **kw))
return getattr(args[0], field)
return doTheThing
return decorate
class Biobank(object):
"""An interface to a dump of our Biobank.
Also used internally by the biobank website to access the data.
In essence, a souped up dict of pandas dataframes stored
as pickles in a zip file with memory caching"""
def __init__(self, filename):
self.filename = filename
self.zf = zipfile.ZipFile(filename)
if not "_meta/_data_format" in self.zf.namelist():
self.data_format = "msg_pack"
else:
with self.zf.open("_meta/_data_format") as op:
self.data_format = op.read().decode("utf-8")
if self.data_format not in ("msg_pack", "parquet"):
raise ValueError(
"Unexpected data format (%s). Do you need to update marburg_biobank"
% (self.data_format)
)
self._cached_datasets = {}
@property
def tall(self):
return _BiobankItemAccessor(self.list_datasets, lambda dataset: self.get_dataset(dataset, apply_exclusion=True))
@property
def wide(self):
return _BiobankItemAccessor(self.list_datasets, lambda dataset: self.get_wide(dataset, apply_exclusion=True))
def get_all_patients(self):
df = self.get_dataset("_meta/patient_compartment_dataset")
return set(df["patient"].unique())
def number_of_patients(self):
"""How many patients/indivuums are in all datasets?"""
return len(self.get_all_patients())
def number_of_datasets(self):
"""How many different datasets do we have"""
return len(self.list_datasets())
def get_compartments(self):
"""Get all compartments we have data for"""
pcd = self.get_dataset("_meta/patient_compartment_dataset")
return pcd
@lru_cache(datasets_to_cache)
def get_dataset_compartments(self, dataset):
"""Get available compartments in dataset @dataset"""
ds = self.get_dataset(dataset)
columns = self.get_dataset_compartment_columns(dataset)
if not columns:
return []
else:
sub_ds = ds[columns]
sub_ds = sub_ds[~sub_ds.duplicated()]
result = []
for dummy_idx, row in sub_ds.iterrows():
result.append(tuple([row[x] for x in columns]))
return set(result)
@lru_cache(datasets_to_cache)
def get_dataset_compartment_columns(self, dataset):
"""Get available compartments columns in dataset @dataset"""
ds = self.get_dataset(dataset)
columns = [
x for x in known_compartment_columns if x in ds.columns
] # compartment included for older datasets
return columns
@lru_cache(datasets_to_cache)
def get_variables_and_units(self, dataset):
"""What variables are availabe in a dataset?"""
df = self.get_dataset(dataset)
if len(df["unit"].cat.categories) == 1:
vars = df["variable"].unique()
unit = df["unit"].iloc[0]
return set([(v, unit) for v in vars])
else:
x = df[["variable", "unit"]].drop_duplicates(["variable", "unit"])
return set(zip(x["variable"], x["unit"]))
def get_possible_values(self, dataset, variable, unit):
df = self.get_dataset(dataset)
return df["value"][(df["variable"] == variable) & (df["unit"] == unit)].unique()
@lazy_member("_cache_list_datasets")
def list_datasets(self):
"""What datasets to we have"""
if self.data_format == "msg_pack":
return sorted(
[
name
for name in self.zf.namelist()
if not name.startswith("_")
and not os.path.basename(name).startswith("_")
]
)
elif self.data_format == "parquet":
return sorted(
[
name[: name.rfind("/")]
for name in self.zf.namelist()
if not name.startswith("_")
and not os.path.basename(name[: name.rfind("/")]).startswith("_")
and name.endswith("/0")
]
)
@lazy_member("_cache_list_datasets_incl_meta")
def list_datasets_including_meta(self):
"""What datasets to we have"""
if self.data_format == "msg_pack":
return sorted(self.zf.namelist())
elif self.data_format == "parquet":
import re
raw = self.zf.namelist()
without_numbers = [
x if not re.search("/[0-9]+$", x) else x[: x.rfind("/")] for x in raw
]
return sorted(set(without_numbers))
@lazy_member("_datasets_with_name_lookup")
def datasets_with_name_lookup(self):
return [ds for (ds, df) in self.iter_datasets() if "name" in df.columns]
def name_lookup(self, dataset, variable):
df = self.get_dataset(dataset)
# todo: optimize using where?
return df[df.variable == variable]["name"].iloc[0]
def variable_or_name_to_variable_and_unit(self, dataset, variable_or_name):
df = self.get_dataset(dataset)[["variable", "name", "unit"]]
rows = df[(df.variable == variable_or_name) | (df.name == variable_or_name)]
if len(rows["variable"].unique()) > 1:
raise ValueError(
"variable_or_name_to_variable led to multiple variables (%i): %s"
% (len(rows["variable"].unique()), rows["variable"].unique())
)
try:
r = rows.iloc[0]
except IndexError:
raise KeyError("Not found: %s" % variable_or_name)
return r["variable"], r["unit"]
def _get_dataset_columns_meta(self):
import json
with self.zf.open("_meta/_to_wide_columns") as op:
return json.loads(op.read().decode("utf-8"))
def has_wide(self, dataset):
if dataset.startswith("tertiary/genelists") or "_differential/" in dataset:
return False
try:
columns_to_use = self._get_dataset_columns_meta()
except KeyError:
return True
if dataset in columns_to_use and not columns_to_use[dataset]:
return False
return True
@lru_cache(maxsize=datasets_to_cache)
def get_wide(
self,
dataset,
apply_exclusion=True,
standardized=False,
filter_func=None,
column="value",
):
"""Return dataset in row=variable, column=patient format.
if @standardized is True Index is always (variable, unit) or (variable, unit, name),
and columns always (patient, [compartment, cell_type, disease])
Otherwise, unit and compartment will be left off if there is only a
single value for them in the dataset
if @apply_exclusion is True, excluded patients will be filtered from DataFrame
@filter_func is run on the dataset before converting to wide, it
takes a df, returns a modified df
"""
dataset = self.dataset_exists(dataset)
if not self.has_wide(dataset):
raise WideNotSupported()
df = self.get_dataset(dataset)
if filter_func:
df = filter_func(df)
index = ["variable"]
columns = self._get_wide_columns(dataset, df, standardized)
if standardized or len(df.unit.cat.categories) > 1:
index.append("unit")
if "name" in df.columns:
index.append("name")
# if 'somascan' in dataset:
# raise ValueError(dataset, df.columns, index ,columns)
dfw = self.to_wide(df, index, columns, column=column)
if apply_exclusion:
try:
return self.apply_exclusion(dataset, dfw)
except CantApplyExclusion:
return dfw
else:
return dfw
def _get_wide_columns(self, dataset, tall_df, standardized):
try:
columns_to_use = self._get_dataset_columns_meta()
except KeyError:
columns_to_use = {}
if dataset in columns_to_use:
columns = columns_to_use[dataset]
if standardized:
for x in known_compartment_columns:
if not x in columns:
columns.append(x)
if x in tall_df.columns and (
(
hasattr(tall_df[x], "cat")
and (len(tall_df[x].cat.categories) > 1)
)
or (len(tall_df[x].unique()) > 1)
):
pass
else:
if standardized and x not in tall_df.columns:
tall_df = tall_df.assign(**{x: np.nan})
else:
if "vid" in tall_df.columns and not "patient" in tall_df.columns:
columns = ["vid"]
elif "patient" in tall_df.columns:
columns = ["patient"]
else:
raise ValueError(
"Do not know how to convert this dataset to wide format."
" Retrieve it get_dataset() and call to_wide() manually with appropriate parameters."
)
for x in known_compartment_columns:
if x in tall_df.columns or (standardized and x != "compartment"):
if not x in columns:
columns.append(x)
if x in tall_df.columns and (
(
hasattr(tall_df[x], "cat")
and (len(tall_df[x].cat.categories) > 1)
)
or (len(tall_df[x].unique()) > 1)
):
pass
else:
if standardized and x not in tall_df.columns:
tall_df = tall_df.assign(**{x: np.nan})
elif not standardized:
if (
hasattr(tall_df[x], "cat")
and (len(tall_df[x].cat.categories) == 1)
) or (len(tall_df[x].unique()) == 1):
if x in columns:
columns.remove(x)
return columns
def to_wide(
self,
df,
index=["variable"],
columns=known_compartment_columns,
sort_on_first_level=False,
column='value',
):
"""Convert a dataset (or filtered dataset) to a wide DataFrame.
Preferred to pd.pivot_table manually because it is
a) faster and
b) avoids a bunch of pitfalls when working with categorical data and
c) makes sure the columns are dtype=float if they contain nothing but floats
index = variable,unit
columns = (patient, compartment, cell_type)
"""
if columns == known_compartment_columns:
columns = [x for x in columns if x in df.columns]
# raise ValueError(df.columns,index,columns)
chosen = [column] + index + columns
df = df.loc[:, [x for x in chosen if x in df.columns]]
for x in chosen:
if x not in df.columns:
df = df.assign(**{x: np.nan})
set_index_on = index + columns
columns_pos = tuple(range(len(index), len(index) + len(columns)))
res = df.set_index(set_index_on).unstack(columns_pos)
c = res.columns
c = c.droplevel(0)
# this removes categories from the levels of the index. Absolutly
# necessary, or you can't add columns later otherwise
if isinstance(c, pd.MultiIndex):
try:
c = pd.MultiIndex(
[list(x) for x in c.levels], codes=c.codes, names=c.names
)
except AttributeError:
c = pd.MultiIndex(
[list(x) for x in c.levels], labels=c.labels, names=c.names
)
else:
c = list(c)
res.columns = c
single_unit = not 'unit' in df.columns or len(df['unit'].unique()) == 1
if isinstance(c, list):
res.columns.names = columns
if sort_on_first_level:
# sort on first level - ie. patient, not compartment - slow though
res = res[sorted(list(res.columns))]
for c in res.columns:
x = res[c].fillna(value=np.nan, inplace=False)
if (x == None).any(): # noqa: E711
raise ValueError("here")
if single_unit: # don't do this for multiple units -> might have multiple dtypes
try:
res[c] = | pd.to_numeric(x, errors="raise") | pandas.to_numeric |
import shlex
import os
import sys
import subprocess
import json
import pprint
import numpy as np
import pandas as pd
APPEND = '0ms'
if len(sys.argv) == 3:
APPEND = sys.argv[2]
LOG_BASE_DIR = '../logs/'
LOG_DIR = f'{LOG_BASE_DIR}/kem_{APPEND}'
PKL_DIR = './pkl/kem'
def parse_algo(l):
split = l.split('_')
ts = split[1]
run = split[-2]
algo = '_'.join(split[4:-2]).split('.')[0]
return (algo, ts, run)
def parse_bench(line, algo, ts, run):
line = line.rstrip()[7:]
d = dict(token.split('=') for token in shlex.split(line))
d['algo'] = algo
d['ts'] = ts
d['run'] = run
return d
def parse_time(line, algo, ts, run):
s = line.rstrip().split(' ')
return {'run': run, 'ts': ts, 'type': s[0], 'algo': algo, 'clock': s[1]}
def __get_frame_info(frame, d):
d['time'] = frame['frame.time']
d['time_delta'] = frame['frame.time_delta']
d['frame_nr'] = frame['frame.number']
d['frame_len'] = frame['frame.len']
return d
def __get_udp_info(udp, d):
d['src'] = udp['udp.srcport']
d['dst'] = udp['udp.dstport']
return d
def __get_rad_info(radius, d):
d['rad_len'] = radius['radius.length']
d['rad_code'] = radius['radius.code']
d['rad_id'] = radius['radius.id']
return d
def __parse_tls_real_type(__d):
if 'tls.handshake.type' in __d:
__d['tls_real_type'] = __d['tls.handshake.type']
elif 'tls.record.opaque_type' in __d:
__d['tls_real_type'] = __d['tls.record.opaque_type']
else:
__d['tls_real_type'] = __d['tls.record.content_type']
return __d
def __parse_tls_record_fields(record, __d):
for field in record:
if field == 'tls.record.version':
__d['tls.record.version'] = record['tls.record.version']
elif field == 'tls.record.opaque_type':
__d['tls.record.content_type'] = record['tls.record.opaque_type']
elif field == 'tls.record.content_type':
__d['tls.record.content_type'] = record['tls.record.content_type']
elif field == 'tls.record.length':
__d['tls.record.length'] = record['tls.record.length']
elif field == 'tls.handshake':
if 'tls.handshake.type' in record[field]:
__d['tls.handshake.type'] = record[field]['tls.handshake.type']
if 'tls.handshake.length' in record[field]:
__d['tls.handshake.length'] = record[field]['tls.handshake.length']
else:
pass
return __parse_tls_real_type(__d)
def __parse_eap(eap, _d):
_d['eap.id'] = eap['eap.id']
_d['eap.code'] = eap['eap.code']
_d['eap.len'] = eap['eap.len']
if 'eap.type' in eap:
_d['eap.type'] = eap['eap.type']
return _d
def parse_cap(capfile, algo, ts, run):
cap = []
tshark = ('tshark', '-n', '-2', '-r', capfile, '-T', 'json', '--no-duplicate-keys')
o = subprocess.Popen(tshark, stdout=subprocess.PIPE)
packets = json.loads(o.communicate()[0])
pkt_count = 0
for _x, packet in enumerate(packets):
d = {'algo': algo, 'ts': ts, 'run': run}
packet = packet['_source']
d = __get_frame_info(packet['layers']['frame'], d)
if 'radius' not in packet['layers']:
continue
d['frame_count'] = pkt_count
pkt_count += 1
d = __get_udp_info(packet['layers']['udp'], d)
d = __get_rad_info(packet['layers']['radius'], d)
radius = packet['layers']['radius']
for avp_count, x in enumerate(radius['Attribute Value Pairs']['radius.avp_tree']):
has_tls_layer = False
_d = d.copy()
_d['avp_count'] = avp_count
for k in x:
if k == 'radius.avp.type':
_d['rad_avp_t'] = x['radius.avp.type']
elif k == 'radius.avp.length':
_d['rad_avp_len'] = x['radius.avp.length']
elif k == 'eap':
if _x == 0:
assert(x[k]['eap.code'] == '2' and x[k]['eap.type'] == '1')
if _x == len(packets)-1:
assert(x[k]['eap.code'] == '3')
_d = __parse_eap(x[k], _d)
if 'tls' in x[k]:
if not isinstance(x[k]['tls'],str):
for _k in x[k]['tls']:
if _k == 'tls.record':
records = x[k]['tls'][_k]
if isinstance(records, dict):
records = [records]
if len(records) > 0:
has_tls_layer = True
for i, record in enumerate(records):
__d = __parse_tls_record_fields(record, _d.copy())
__d['record_count'] = i
cap.append(__d)
elif _k == 'Ignored Unknown Record':
pass
else:
print(d['frame_nr'])
pprint.pprint(x[k])
if not has_tls_layer:
cap.append(_d)
return cap
def parse_inst(instfile, algo, ts, run):
log = open(instfile,'r')
bench = []
time = []
for line in log.readlines():
if line.startswith('Bench: '):
bench.append(parse_bench(line, algo, ts, run))
elif line.startswith('time_'):
time.append(parse_time(line, algo, ts, run))
else:
continue
log.close()
return bench, time
def beautify_msg(_msg_cb):
_msg_cb['len'] = _msg_cb['len'].astype('int64')
_msg_cb['clock'] = _msg_cb['clock'].astype(float)
_msg_cb['clock_delta'] = _msg_cb['clock_delta'].astype(float)
_msg_cb['clock_abs'] = _msg_cb['clock_abs'].astype(float)
_msg_cb['time'] = _msg_cb['time'].astype(float)
_msg_cb['time_delta'] = _msg_cb['time_delta'].astype(float)
_msg_cb['time_abs'] = _msg_cb['time_abs'].astype(float)
_msg_cb['sum_len'] = _msg_cb['sum_len'].astype(float)
_msg_cb['n'] = _msg_cb['n'].astype(int)
_msg_cb = _msg_cb.reset_index().drop(['index', 'type'], axis = 1)
return _msg_cb
def beautify_info(_info_cb):
_info_cb['clock'] = _info_cb['clock'].astype(float)
_info_cb['clock_delta'] = _info_cb['clock_delta'].astype(float)
_info_cb['clock_abs'] = _info_cb['clock_abs'].astype(float)
_info_cb['time'] = _info_cb['clock_delta'].astype(float)
_info_cb['time_delta'] = _info_cb['time_delta'].astype(float)
_info_cb['time_abs'] = _info_cb['time_abs'].astype(float)
_info_cb['n'] = _info_cb['n'].astype(float)
_info_cb = _info_cb.reset_index().drop(['index', 'type'], axis = 1)
return _info_cb
def beautify_time(_time_df):
_time_df['clock'] = _time_df['clock'].astype('float')
#_time_df['cpu_time'] = _time_df['cpu_time'].astype('float')
#_time_df['wct'] = _time_df['wct'].astype('float')
_df_total = _time_df[_time_df['type'] == 'time_total']
_df_eap = _time_df[_time_df['type'] == 'time_eap']
return _df_total, _df_eap
def beautify_cap(_cap_df):
_cap_df['frame_nr'] = _cap_df['frame_nr'].astype(int)
_cap_df['ts'] = _cap_df['ts'].astype(int)
_cap_df['run'] = _cap_df['run'].astype(int)
_cap_df['time'] = pd.to_datetime(_cap_df['time'])
_cap_df['time_delta'] = _cap_df['time_delta'].astype(float)
_cap_df['frame_len'] = _cap_df['frame_len'].astype(int)
_cap_df['rad_len'] = _cap_df['rad_len'].astype(int)
_cap_df['rad_avp_len'] = _cap_df['rad_avp_len'].astype(int)
_cap_df['eap.len'] = _cap_df['eap.len'].astype(float)
_cap_df['tls.record.length'] = _cap_df['tls.record.length'].astype(float)
_cap_df['tls.handshake.length'] = _cap_df['tls.handshake.length'].astype(float)
return _cap_df
def beautify(bench, time, cap):
_msg_cb = None
_info_cb = None
_df_total = None
_df_eap = None
_cap_df = None
bench_df = pd.DataFrame(bench)
if len(bench_df) > 0:
_msg_cb = bench_df[bench_df['type'] == 'tls_msg_cb_bench'].copy().dropna(axis='columns')
_msg_cb = beautify_msg(_msg_cb)
if len(bench_df) > 0:
_info_cb = bench_df[bench_df['type'] == 'tls_info_cb_bench'].copy().dropna(axis='columns')
_info_cb = beautify_info(_info_cb)
time_df = pd.DataFrame(time)
if len(time_df) > 0:
_df_total, _df_eap = beautify_time(time_df)
_cap_df = pd.DataFrame(cap)
if len(_cap_df) > 0:
_cap_df = beautify_cap(_cap_df)
return _msg_cb, _info_cb, _df_total, _df_eap, _cap_df
def _parse(_min=0, _max=None):
bench = []
time = []
cap = []
dirlist = os.listdir(LOG_DIR)
if _max is None:
_max=len(dirlist)
for i, l in enumerate(dirlist):
if i < _min or i > _max:
continue
print(f'Parsing log {i}/{len(dirlist)}: {l}')
algo, ts, run = parse_algo(l)
if l.endswith('_inst.log'):
instfile = f'{LOG_DIR}/{l}'
a = []
b = []
a, b = parse_inst(instfile, algo, ts, run)
bench += a
time += b
elif l.endswith('.cap'):
capfile = f'{LOG_DIR}/{l}'
cap += parse_cap(capfile, algo, ts, run)
else:
print(f"Error unknown log {l}")
sys.exit(1)
return beautify(bench, time, cap)
def main(load=None, store=None):
if load is not None:
_msg_cb = pd.read_pickle(f"{PKL_DIR}/msg_cb_{APPEND}.pkl")
_info_cb = pd.read_pickle(f"{PKL_DIR}/info_cb_{APPEND}.pkl")
_df_total = pd.read_pickle(f"{PKL_DIR}/df_total_{APPEND}.pkl")
_df_eap = | pd.read_pickle(f"{PKL_DIR}/df_eap_{APPEND}.pkl") | pandas.read_pickle |
# -*- coding: utf-8 -*-
"""
Tests that comments are properly handled during parsing
for all of the parsers defined in parsers.py
"""
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.compat import StringIO
class CommentTests(object):
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
# check with delim_whitespace=True
df = self.read_csv(StringIO(data.replace(',', ' ')), comment='#',
delim_whitespace=True)
tm.assert_almost_equal(df.values, expected)
# custom line terminator is not supported
# with the Python parser yet
if self.engine == 'c':
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data.replace('\n', '*')),
comment='#', lineterminator='*')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
# this should ignore the first four lines (including comments)
expected = [[1., 2., 4.], [5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
# header should begin at the second non-comment line
expected = [[1., 2., 4.], [5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
expected = [[1., 2., 4.], [5., np.nan, 10.]]
df = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
import xml.etree.ElementTree as etree
import pandas as pd
import numpy as np
import os
# TODO: add to_pi_json() method. (Both PiTimeSeries and PiTimeSeriesCollection should be able to call this method)
# TODO: adapt to_pi_xml() and to_pi_json() from PiTimeSeries by Mattijn. Probably more robust write methods.
class PiBase:
"""
Mix-in class for functionality that applies to both PiTimeSeries and PiTimeSeriesCollection.
"""
def to_pi_json(self, fnam):
#TODO: write to_pi_json function.
raise NotImplementedError()
def to_pi_xml(self, fnam):
"""
Write PiTimeSeries object to PI-XML file.
Parameters
----------
fnam: path
path to XML file to be written
TODO: allow events (timeseries lines) to accept other fields besides 'date', 'time', 'value', 'flag'
"""
assert fnam.endswith(".xml"), "Output file should have '.xml' extension!"
# first line of XML file
line0 = '<?xml version="1.0" encoding="UTF-8"?>\n'
# some definitions for timeseries XML file
NS = r"http://www.wldelft.nl/fews/PI"
FS = r"http://www.wldelft.nl/fews/fs"
XSI = r"http://www.w3.org/2001/XMLSchema-instance"
schemaLocation = r"http://fews.wldelft.nl/schemas/version1.0/pi-schemas/pi_timeseries.xsd"
timeseriesline = '<TimeSeries xmlns="{NS}" xmlns:xsi="{XSI}" xsi:schemaLocation="{NS} {schema}" version="{version}" xmlns:fs="{FS}">\n'
# line templates
paramline = "<{tag}>{param}</{tag}>\n"
# write file
with open(fnam, "w") as f:
f.write(line0)
f.write(timeseriesline.format(NS=NS, FS=FS, XSI=XSI, schema=schemaLocation, version=self.version))
tzline = "\t" + paramline.format(tag="timeZone", param=self.timezone)
f.write(tzline)
# how best to do this? Needs to be generic for single series vs collection of series
N = 1 if isinstance(self, FewsTimeSeries) else self.timeseries.shape[0]
for i in range(N):
if isinstance(self, FewsTimeSeries):
ts = self
elif isinstance(self, FewsTimeSeriesCollection):
ts = self.timeseries["events"].iloc[i]
# start series
start = "\t" + "<series>\n"
f.write(start)
# write header
hlines = []
hstart = 2*"\t" + "<header>\n"
hlines.append(hstart)
for htag, hval in ts.header.items():
if htag.endswith("Date"):
try:
hdate = hval.strftime("%Y-%m-%d")
htime = hval.strftime("%H:%M:%S")
except AttributeError:
ts._update_header_dates()
hdate, htime = ts.header[htag].split(" ")
hline = '<{tag} date="{date}" time="{time}"/>\n'.format(tag=htag,
date=hdate,
time=htime)
elif htag.endswith("timeStep"):
hline = '<{tag} unit="{unit}"/>\n'.format(tag=htag, unit=hval)
else:
hline = paramline.format(tag=htag, param=hval)
hlines.append(3*"\t" + hline)
hlines.append(2*"\t" + "</header>\n")
f.writelines(hlines)
# write timeseries
dates = ts.timeseries.reset_index()["index"].apply(lambda s: | pd.datetime.strftime(s, "%Y-%m-%d") | pandas.datetime.strftime |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2020 azai/Rgveda/GolemQuant
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import datetime
import time
import numpy as np
import pandas as pd
import pymongo
try:
import QUANTAXIS as QA
from QUANTAXIS.QAUtil import (QASETTING,
DATABASE,
QA_util_log_info,
QA_util_to_json_from_pandas,)
from QUANTAXIS.QAUtil.QAParameter import ORDER_DIRECTION
from QUANTAXIS.QAData.QADataStruct import (QA_DataStruct_Index_min,
QA_DataStruct_Index_day,
QA_DataStruct_Stock_day,
QA_DataStruct_Stock_min)
from QUANTAXIS.QAUtil.QADate_Adv import (
QA_util_timestamp_to_str,
QA_util_datetime_to_Unix_timestamp,
QA_util_print_timestamp
)
except:
print('PLEASE run "pip install QUANTAXIS" to call these modules')
pass
try:
from GolemQ.GQUtil.parameter import (
AKA,
INDICATOR_FIELD as FLD,
TREND_STATUS as ST,
)
except:
class AKA():
"""
趋势状态常量,专有名称指标,定义成常量可以避免直接打字符串造成的拼写错误。
"""
# 蜡烛线指标
CODE = 'code'
NAME = 'name'
OPEN = 'open'
HIGH = 'high'
LOW = 'low'
CLOSE = 'close'
VOLUME = 'volume'
VOL = 'vol'
DATETIME = 'datetime'
LAST_CLOSE = 'last_close'
PRICE = 'price'
SYSTEM_NAME = 'myQuant'
def __setattr__(self, name, value):
raise Exception(u'Const Class can\'t allow to change property\' value.')
return super().__setattr__(name, value)
class ST():
"""
趋势状态常量,专有名称指标,定义成常量可以避免直接打字符串造成的拼写错误。
"""
# 状态
POSITION_R5 = 'POS_R5'
TRIGGER_R5 = 'TRG_R5'
CANDIDATE = 'CANDIDATE'
def __setattr__(self, name, value):
raise Exception(u'Const Class can\'t allow to change property\' value.')
return super().__setattr__(name, value)
class FLD():
DATETIME = 'datetime'
ML_FLU_TREND = 'ML_FLU_TREND'
FLU_POSITIVE = 'FLU_POSITIVE'
FLU_NEGATIVE = 'FLU_NEGATIVE'
def __setattr__(self, name, value):
raise Exception(u'Const Class can\'t allow to change property\' value.')
return super().__setattr__(name, value)
def GQSignal_util_save_indices_day(code,
indices,
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
ui_log=None,
ui_progress=None):
"""
在数据库中保存所有计算出来的股票日线指标,用于汇总评估和筛选数据——日线
save stock_indices, state
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
def _check_index(coll_indices):
coll_indices.create_index([("code",
pymongo.ASCENDING),
(FLD.DATETIME,
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([("date",
pymongo.ASCENDING),
(ST.TRIGGER_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([("date",
pymongo.ASCENDING),
(ST.POSITION_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([('date_stamp',
pymongo.ASCENDING),
(ST.TRIGGER_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([('date_stamp',
pymongo.ASCENDING),
(ST.POSITION_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([("date",
pymongo.ASCENDING),
(FLD.FLU_POSITIVE,
pymongo.ASCENDING),],)
coll_indices.create_index([('date_stamp',
pymongo.ASCENDING),
(FLD.FLU_POSITIVE,
pymongo.ASCENDING),],)
coll_indices.create_index([("code",
pymongo.ASCENDING),
('date_stamp',
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([("code",
pymongo.ASCENDING),
("date",
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([("code",
pymongo.ASCENDING),
(FLD.DATETIME,
pymongo.ASCENDING),
(ST.CANDIDATE,
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([("code",
pymongo.ASCENDING),
('date_stamp',
pymongo.ASCENDING),
(ST.CANDIDATE,
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([("code",
pymongo.ASCENDING),
("date",
pymongo.ASCENDING),
(ST.CANDIDATE,
pymongo.ASCENDING),],
unique=True)
def _formatter_data(indices):
frame = indices.reset_index(1, drop=False)
# UTC时间转换为北京时间
frame['date'] = pd.to_datetime(frame.index,).tz_localize('Asia/Shanghai')
frame['date'] = frame['date'].dt.strftime('%Y-%m-%d')
frame['datetime'] = pd.to_datetime(frame.index,).tz_localize('Asia/Shanghai')
frame['datetime'] = frame['datetime'].dt.strftime('%Y-%m-%d %H:%M:%S')
# GMT+0 String 转换为 UTC Timestamp
frame['date_stamp'] = pd.to_datetime(frame['date']).astype(np.int64) // 10 ** 9
frame['created_at'] = int(time.mktime(datetime.datetime.now().utctimetuple()))
frame = frame.tail(len(frame) - 150)
return frame
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_day
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_day
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.fund_cn_indices_day
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_day
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_day
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! {} \n '.format(market_type), ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
_check_index(coll_indices)
data = _formatter_data(indices)
err = []
# 查询是否新 tick
query_id = {
"code": code,
'date_stamp': {
'$in': data['date_stamp'].tolist()
}
}
refcount = coll_indices.count_documents(query_id)
if refcount > 0:
if (len(data) > 1):
# 删掉重复数据
coll_indices.delete_many(query_id)
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
else:
# 持续更新模式,更新单条记录
data.drop('created_at', axis=1, inplace=True)
data = QA_util_to_json_from_pandas(data)
coll_indices.replace_one(query_id, data[0])
else:
# 新 tick,插入记录
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
return True
def GQSignal_util_save_indices_min(code,
indices,
frequence,
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
ui_log=None,
ui_progress=None):
"""
在数据库中保存所有计算出来的指标信息,用于汇总评估和筛选数据——分钟线
save stock_indices, state
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
def _check_index(coll_indices):
coll_indices.create_index([("code",
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
(FLD.DATETIME,
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([("code",
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
("time_stamp",
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([(FLD.DATETIME,
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
(ST.TRIGGER_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([(FLD.DATETIME,
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
(ST.POSITION_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([("type",
pymongo.ASCENDING),
("time_stamp",
pymongo.ASCENDING),
(ST.TRIGGER_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([("type",
pymongo.ASCENDING),
("time_stamp",
pymongo.ASCENDING),
(ST.POSITION_R5,
pymongo.ASCENDING),],)
coll_indices.create_index([(FLD.DATETIME,
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
(FLD.FLU_POSITIVE,
pymongo.ASCENDING),],)
coll_indices.create_index([("type",
pymongo.ASCENDING),
("time_stamp",
pymongo.ASCENDING),
(FLD.FLU_POSITIVE,
pymongo.ASCENDING),],)
coll_indices.create_index([("code",
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
(FLD.DATETIME,
pymongo.ASCENDING),
(ST.CANDIDATE,
pymongo.ASCENDING),],
unique=True)
coll_indices.create_index([("code",
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
("time_stamp",
pymongo.ASCENDING),
(ST.CANDIDATE,
pymongo.ASCENDING),],
unique=True)
def _formatter_data(indices, frequence):
frame = indices.reset_index(1, drop=False)
# UTC时间转换为北京时间
frame['date'] = pd.to_datetime(frame.index,).tz_localize('Asia/Shanghai')
frame['date'] = frame['date'].dt.strftime('%Y-%m-%d')
frame['datetime'] = pd.to_datetime(frame.index,).tz_localize('Asia/Shanghai')
frame['datetime'] = frame['datetime'].dt.strftime('%Y-%m-%d %H:%M:%S')
# GMT+0 String 转换为 UTC Timestamp
frame['time_stamp'] = pd.to_datetime(frame['datetime']).astype(np.int64) // 10 ** 9
frame['type'] = frequence
frame['created_at'] = int(time.mktime(datetime.datetime.now().utctimetuple()))
frame = frame.tail(len(frame) - 150)
return frame
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_min
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_min
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_min
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
_check_index(coll_indices)
data = _formatter_data(indices, frequence)
err = []
# 查询是否新 tick
query_id = {
"code": code,
'type': frequence,
"time_stamp": {
'$in': data['time_stamp'].tolist()
}
}
refcount = coll_indices.count_documents(query_id)
if refcount > 0:
if (len(data) > 1):
# 删掉重复数据
coll_indices.delete_many(query_id)
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
else:
# 持续更新模式,更新单条记录
data.drop('created_at', axis=1, inplace=True)
data = QA_util_to_json_from_pandas(data)
coll_indices.replace_one(query_id, data[0])
else:
# 新 tick,插入记录
data = QA_util_to_json_from_pandas(data)
coll_indices.insert_many(data)
return True
def GQSignal_fetch_position_singal_day(start,
end,
frequence='day',
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
format='numpy',
ui_log=None,
ui_progress=None):
"""
'获取股票指标日线'
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_min
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_min
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_min
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
if QA_util_date_valid(end):
cursor = coll_indices.find({
ST.TRIGGER_R5: {
'$gt': 0
},
"date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
try:
res = res.assign(date=pd.to_datetime(res.date)).drop_duplicates((['date',
'code'])).set_index(['date',
'code'],
drop=False)
codelist = QA.QA_fetch_stock_name(res[AKA.CODE].tolist())
res['name'] = res.apply(lambda x:codelist.at[x.get(AKA.CODE), 'name'], axis=1)
except:
res = None
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error GQSignal_fetch_position_singal_day format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
else:
QA_util_log_info('QA Error GQSignal_fetch_position_singal_day data parameter start=%s end=%s is not right' % (start,
end))
def GQSignal_fetch_singal_day(code,
start,
end,
frequence='day',
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
format='numpy',
ui_log=None,
ui_progress=None):
"""
获取股票日线指标/策略信号数据
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_min
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_min
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_min
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
# code checking
code = QA_util_code_tolist(code)
if QA_util_date_valid(end):
cursor = coll_indices.find({
'code': {
'$in': code
},
"date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
try:
res = res.assign(date= | pd.to_datetime(res.date) | pandas.to_datetime |
###############################################################################
# Summarize OSM roads lengths
# <NAME>, July 2018
# Purpose: summarize road lengths within features in defined shapefile
###############################################################################
import os, sys, time, subprocess, argparse, logging
import GOSTnets as gn
#from . import OSMNX_POIs
import geopandas as gpd
import osmnx as ox
import pandas as pd
import networkx as nx
from shapely.geometry import box
#import misc
# Highway features are reclassified to 4 OSMLR classes for simplification and standardization
# https://mapzen.com/blog/osmlr-2nd-technical-preview/
OSMLR_Classes = {
"motorway":"OSMLR level 1",
"motorway_link":"OSMLR level 1",
"trunk":"OSMLR level 1",
"trunk_link":"OSMLR level 1",
"primary":"OSMLR level 1",
"primary_link":"OSMLR level 1",
"secondary":"OSMLR level 2",
"secondary_link":"OSMLR level 2",
"tertiary":"OSMLR level 2",
"tertiary_link":"OSMLR level 2",
"unclassified":"OSMLR level 3",
"unclassified_link": "OSMLR level 3",
"residential": "OSMLR level 3",
"residential_link": "OSMLR level 3",
"track": "OSMLR level 4",
"service": "OSMLR level 4"
}
class osmExtraction(object):
'''
Download and installation instructions and basic usage can be found here
https://wiki.openstreetmap.org/wiki/Osmosis
'''
def __init__(self, osmosisCmd = r"C:\WBG\Anaconda\Osmosis\bin\osmosis", tempFile = r"C:\WBG\Anaconda\Osmosis\tempExecution.bat"):
'''
osmosisCmd is the path to the file osmosis in the bin folder of the downloaded
stable version of osmosis (link above)
'''
self.osmosisCommand = osmosisCmd
self.tempFile = tempFile
def extractAmmenities(self, inPbf, outFile, amenityList=["amenity.school"], bounds=[], execute=False):
''' Read input osmpbf, extract all buildings and spit out to shapefile
INPUT
inPbf [string] - path to input pbf
outFile [string] - path to output shapefile
OPTIONAL
amenity [string] - definition of amenity to extract - defaults to schools
bounds [list of numbers??] - bounds of area to extract, if not defined, covers entire input area
'''
if len(amenityList) > 1:
amenity = ",".join(amenityList)
else:
amenity = amenityList[0]
baseCommand = r'{osmCommand} --read-pbf {inPbf} --nkv keyValueList="{amenity}"'.format(
osmCommand = self.osmosisCommand,
inPbf = inPbf,
amenity=amenity)
if len(bounds) > 0:
baseCommand = "{baseCommand} --bounding-box top={top} left={left} bottom={bottom} right={right}".format(
baseCommand=baseCommand,
top = bounds[3],
left = bounds[0],
bottom = bounds[1],
right = bounds[2])
baseCommand = '{baseCommand} --write-pbf {outPbf}'.format(
baseCommand = baseCommand,
outPbf = outFile
)
if not execute:
return(baseCommand)
else:
with open(self.tempFile, 'w') as outFile:
outFile.write(baseCommand)
subprocess.call([self.tempFile], shell=True)
def extractBuildings(self, inPbf, outFile, bounds=[], execute=True):
''' Read input osmpbf, extract all buildings and spit out to shapefile
INPUT
inPbf [string] - path to input pbf
outFile [string] - path to output shapefile
'''
baseCommand = r"{osmCommand} --read-pbf {inPbf} --tf accept-ways building=*".format(
osmCommand = self.osmosisCommand,
inPbf = inPbf,
outPbf = outFile)
if len(bounds) > 0:
baseCommand = "{baseCommand} --bounding-box top={top} left={left} bottom={bottom} right={right}".format(
baseCommand=baseCommand,
top = bounds[3],
left = bounds[0],
bottom = bounds[1],
right = bounds[2])
baseCommand = "{baseCommand} --write-pbf {outPbf}".format(
baseCommand = baseCommand,
outPbf = outFile)
if not execute:
return(baseCommand)
else:
with open(self.tempFile, 'w') as outFile:
outFile.write(baseCommand)
subprocess.call([self.tempFile], shell=True)
def extractBoundingBox(self, inPbf, inShp, outPbf, execute=True):
''' Extract the inPbf based on the bounding box of the input shapefile
INPUT
inPbf [string] - path to input pbf
inShp [string or geopandas object] - path to aoi shapefile
'''
if type(inShp) == str:
inD = gpd.read_file(inShp)
elif type(inShp) == gpd.GeoDataFrame:
inD = inShp
else:
raise(ValueError("inShp needs to be a string or a geopandas object"))
if inD.crs != {'init':'epsg:4326'}:
inD = inD.to_crs({'init':'epsg:4326'})
baseCommand = r"{osmCommand} --read-pbf {inPbf} --bounding-box top={top} left={left} bottom={bottom} right={right} --write-pbf {outPbf}".format(
osmCommand = self.osmosisCommand,
inPbf = inPbf,
top = float(inD.bounds.maxy),
left = float(inD.bounds.minx),
bottom = float(inD.bounds.miny),
right = float(inD.bounds.maxx),
outPbf = outPbf)
if not execute:
return(baseCommand)
else:
with open(self.tempFile, 'w') as outFile:
outFile.write(baseCommand)
subprocess.call([self.tempFile], shell=True)
def extractHighways(self, inPbf, outOSM, values=[1,2,3,4], bounds = [], execute=True):
''' Extract highways (all roads) from input osm pbf. Can limit definition of which roads to extract
by defining the OSMLT class. see osmMisc.OSMLR_Classes for definition of classes
INPUT
inPbf [string] - path to input osm pbf file
outOSM [string] - path to output osm pbf file
values [list of int] [optional][default = [1,2,3,4] - OSMLR values to extract
bounds [list of coordinates] - boundary to extract
execute [boolean] [default = True] - if set to false, command will return the osmosis command, and not execute it
'''
highwayVals = []
for key, value in OSMLR_Classes.items():
try:
if int(value.split(" ")[2]) in values:
#highwayVals.append("highway=%s" % key)
highwayVals.append(key)
except:
pass
allCommands = ",".join(highwayVals)
baseCommand = r"{osmCmd} --read-pbf {inPbf} --tf accept-ways highway={highwayCommand} --used-node".format(
osmCmd = self.osmosisCommand,
inPbf=inPbf,
highwayCommand=allCommands,
outPbf=outOSM)
if len(bounds) > 0:
baseCommand = "{baseCommand} --bounding-box top={top} left={left} bottom={bottom} right={right}".format(
baseCommand=baseCommand,
top = bounds[3],
left = bounds[0],
bottom = bounds[1],
right = bounds[2])
baseCommand = "{baseCommand} --write-pbf {outPbf}".format(
baseCommand=baseCommand,
outPbf=outOSM)
if not execute:
return(baseCommand)
else:
with open(self.tempFile, 'w') as outFile:
outFile.write(baseCommand)
subprocess.call([self.tempFile], shell=True)
#grid = gpd.read_file(r"Q:\AFRICA\COD\Projects\Shohei_Poverty_Kinshasa\ADMIN\PSUs\bati_ilot_quartier.shp")
#outFolder = r"Q:\AFRICA\COD\Projects\Shohei_Poverty_Kinshasa\ADMIN"
def downloadBaseData(grid, outFolder, amenities=True):
'''Download OSM based data using OSMNX - roads, schools, hospitals, and churches.
INPUT
grid [geopandas dataFrame] - area to download in
outFolder [string] - place to write output
RETURNS
dictionary of [geopandas]
'''
toReturn = {}
roadsFile = os.path.join(outFolder, "OSM_Roads.shp")
if not os.path.exists(roadsFile):
bbox = box(grid.bounds.minx.min(), grid.bounds.miny.min(), grid.bounds.maxx.max(), grid.bounds.maxy.max())
#Download road network
G = ox.graph_from_polygon(bbox, network_type='drive_service')
roads = gn.edge_gdf_from_graph(G)
roads['highway'] = roads.highway.astype(str)
roads['OSMLR'] = roads.highway.map(OSMLR_Classes)
roads['oneway'] = roads.oneway.astype(int)
'''
for badKeys in ['access', 'bridge','junction', 'lanes','oneway', 'osmid', 'ref', 'service','tunnel','width','stnode','endnode','name']:
try:
roads = roads.drop([badKeys],axis=1)
except:
print("Could not drop %s" % badKeys)
'''
try:
roads.to_file(roadsFile)
except:
print("Could not write output")
else:
roads = pd.read_file
toReturn['Roads'] = roads
if amenities:
#Download Schools
schools = OSMNX_POIs.AmenityObject('Health', bbox, ['clinic','pharmacy','hospital','health'], "C:/Temp")
schools = schools.GenerateOSMPOIs()
schools = schools.RemoveDupes(0.005, roads.crs)
schools = schools.prepForMA()
schools.to_csv(os.path.join(outFolder, "OSM_Health.csv"), encoding='utf-8')
toReturn['Schools'] = schools
#Download Hospitals
health = OSMNX_POIs.AmenityObject('Education', bbox, ['school','university','secondary school', 'kindergarten', 'college'], "C:/Temp")
health = health.GenerateOSMPOIs()
health = health.RemoveDupes(0.005, roads.crs)
health = health.prepForMA()
health.to_csv(os.path.join(outFolder, "OSM_Schools.csv"), encoding='utf-8')
toReturn['Health'] = health
#Download Churches
placeOfWorship = OSMNX_POIs.AmenityObject('Churches', bbox, ['place_of_worship'], "C:/Temp")
placeOfWorship = placeOfWorship.GenerateOSMPOIs()
placeOfWorship = placeOfWorship.RemoveDupes(0.005, roads.crs)
placeOfWorship = placeOfWorship.prepForMA()
placeOfWorship.to_csv(os.path.join(outFolder, "OSM_Churches.csv"), encoding='utf-8')
toReturn['placeOfWorship'] = placeOfWorship
return(toReturn)
def summarizeOSM(grid, verbose=True, roadsOnly=False):
''' Summarizes OSM road length within each feature in the input grid
---variables---
grid [GeoDataframe] - each feature will have the length of all roads summarized
--- To Do ---
1. The length projection is web mercator - this is not great, and should instead be projected to UTM
'''
WGS_84 = {'init' :'epsg:4326'}
WEB_MERCATOR = {'init' :'epsg:3857'}
#Extract OSM within the bounding box of the input grid
if grid.crs != WGS_84:
grid = grid.to_crs(WGS_84)
bbox = box(grid.bounds.minx.min(), grid.bounds.miny.min(), grid.bounds.maxx.max(), grid.bounds.maxy.max())
G = ox.graph_from_polygon(bbox, network_type='drive_service')
#Limit the OSM grid to important columns, ignore nodes
nodes,roads = ox.save_load.graph_to_gdfs(G, edges = True)
if roadsOnly:
return(roads)
roads = roads[['geometry','highway','osmid']]
roads['length'] = roads.length
roads['highway'] = roads.highway.astype(str)
roads['OSMLR'] = roads.highway.map(OSMLR_Classes)
FID_list, OSMLR1_list, OSMLR2_list, OSMLR3_list, OSMLRt_list = [], [], [], [], []
cnt = 0
verbose = False
#Loop through all of the input features to summarize OSM
for idx, obj in grid.iterrows():
if idx % 50 == 0 and verbose:
print ("%s of %s" % (cnt, len(grid.shape[0])))
roads2 = roads.copy()
#Find roads that intersect current geometry
roads2 = roads2[roads2.intersects(obj.geometry)]
#Find the intersection of the current geometry and the intersecting roads
roads2['intersecting'] = roads2.geometry.intersection(obj.geometry)
roads2 = roads2.set_geometry('intersecting')
roads2['intersecting_length'] = roads2.length
#Project the roads to a metre-based CRS
roads2 = roads2.to_crs(WEB_MERCATOR)
roads2['intersecting_length'] = roads2.length
FID_list.append(idx)
#Summarize total lenght of OSMLR classes
OSMLR1_list.append(roads2.loc[roads2.OSMLR == 'OSMLR level 1'].intersecting_length.sum())
OSMLR2_list.append(roads2.loc[roads2.OSMLR == 'OSMLR level 2'].intersecting_length.sum())
OSMLR3_list.append(roads2.loc[roads2.OSMLR == 'OSMLR level 3'].intersecting_length.sum())
OSMLRt_list.append(roads2.loc[roads2.OSMLR == 'OSMLR track'].intersecting_length.sum())
cnt = cnt + 1
results = pd.DataFrame({'FID':FID_list,
'OSMLR level 1': OSMLR1_list,
'OSMLR level 2': OSMLR2_list,
'OSMLR level 3': OSMLR3_list,
'OSMLR track': OSMLRt_list})
results['totalOSM'] = results['OSMLR level 1'] + results['OSMLR level 2'] + results['OSMLR level 3'] + results['OSMLR track']
return results
def convertOSMPBF_DataFrame(inOSM, layer):
''' Convert an input OSM PBF to a geopandas dataframe
INPUT
inOSM [string] - path to OSM pbf to convert
layer [string] - data layer to extract. Select from lines, points, multipolygons, multilinestrings
RETURNS
[geopandas data frame]
'''
import ogr, geojson, json
from shapely.geometry import shape
driver=ogr.GetDriverByName('OSM')
data = driver.Open(inOSM)
layer = data.GetLayer(layer)
features=[x for x in layer]
def loadOSMjson(x):
''' convert the OSM JSON to something concumable as geopandas
'''
geom = shape(geojson.loads(json.dumps(x['geometry'])))
x['geometry'] = geom
for key, value in x['properties'].items():
x[key] = value
try:
splitTags = x['other_tags'].split(",")
for tag in splitTags:
tSplit = tag.split("=>")
x[tSplit[0].replace('"', '')] = tSplit[1].replace('"', '')
except:
pass
x.pop("properties", None)
x.pop("other_tags", None)
return(x)
allFeats = [loadOSMjson(feat.ExportToJson(as_object=True)) for feat in features]
inR = | pd.DataFrame(allFeats) | pandas.DataFrame |
# coding:utf-8
import os
from pathlib import Path
import sys
import argparse
import pdb
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tqdm import tqdm
import pickle
import time
from datetime import datetime, timedelta
from sklearn.metrics import confusion_matrix
from functools import partial
import scipy as sp
import matplotlib.pyplot as plt
#from matplotlib_venn import venn2
import lightgbm as lgb
from sklearn import preprocessing
import seaborn as sns
import gc
import psutil
import os
from IPython.display import FileLink
import statistics
import json
import ast
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_validate
import collections
import random
import functools
from sklearn.metrics import roc_curve,auc,accuracy_score,confusion_matrix,f1_score,classification_report
from sklearn.metrics import mean_squared_error
# The metric in question
from sklearn.metrics import cohen_kappa_score
import copy
from sklearn.model_selection import StratifiedKFold, KFold, train_test_split
import itertools
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from distutils.util import strtobool
import math
from scipy.sparse import csr_matrix, save_npz, load_npz
from typing import Union
from sklearn.decomposition import PCA
#import dask.dataframe as dd
import re
from sklearn.cluster import KMeans
from contextlib import contextmanager
from collections import deque
#import eli5
#from eli5.sklearn import PermutationImportance
import shutil
import array
#import sqlite3
#from tsfresh.utilities.dataframe_functions import roll_time_series
#from tsfresh import extract_features
SEED_NUMBER=2020
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
set_seed(SEED_NUMBER)
pd.set_option('display.max_columns', 5000)
pd.set_option('display.max_rows', 1000)
EMPTY_NUM=-999
# https://github.com/lopuhin/kaggle-imet-2019/blob/master/imet/utils.py#L17
ON_KAGGLE = False#'KAGGLE_URL_BASE'in os.environ
#print(" os.environ :", os.environ)
print("ON_KAGGLE:", ON_KAGGLE)
if not ON_KAGGLE:
#import slackweb
try:
import wandb
from wandb.lightgbm import wandb_callback
except:
print(f"error : cannot import wandb")
else:
import warnings
warnings.simplefilter('ignore')
PROJECT_NAME = "probspace_kiva"
INPUT_DIR = Path("../data/raw")
PROC_DIR = Path("../data/proc")
LOG_DIR = Path("../data/log")
OUTPUT_DIR = Path("../data/submission")
PATH_TO_GRAPH_DIR=Path("../data/graph")
PATH_TO_MODEL_DIR=Path("../data/model")
PATH_TO_UPLOAD_MODEL_PARENT_DIR=Path("../data/model")
PATH_TO_FEATURES_DIR=Path("../data/features")
class Colors:
"""Defining Color Codes to color the text displayed on terminal.
"""
blue = "\033[94m"
green = "\033[92m"
yellow = "\033[93m"
red = "\033[91m"
end = "\033[0m"
def color(string: str, color: Colors = Colors.yellow) -> str:
return f"{color}{string}{Colors.end}"
@contextmanager
def timer2(label: str) -> None:
"""compute the time the code block takes to run.
"""
p = psutil.Process(os.getpid())
start = time.time() # Setup - __enter__
m0 = p.memory_info()[0] / 2. ** 30
print(color(f"{label}: Start at {start}; RAM USAGE AT START {m0}"))
try:
yield # yield to body of `with` statement
finally: # Teardown - __exit__
m1 = p.memory_info()[0] / 2. ** 30
delta = m1 - m0
sign = '+' if delta >= 0 else '-'
delta = math.fabs(delta)
end = time.time()
print(color(f"{label}: End at {end} ({end - start}[s] elapsed); RAM USAGE AT END {m1:.2f}GB ({sign}{delta:.2f}GB)", color=Colors.red))
@contextmanager
def trace(title):
t0 = time.time()
p = psutil.Process(os.getpid())
m0 = p.memory_info()[0] / 2. ** 30
yield
m1 = p.memory_info()[0] / 2. ** 30
delta = m1 - m0
sign = '+' if delta >= 0 else '-'
delta = math.fabs(delta)
print(f"[{m1:.1f}GB({sign}{delta:.1f}GB):{time.time() - t0:.1f}sec] {title} ", file=sys.stderr)
def cpu_dict(my_dictionary, text=None):
size = sys.getsizeof(json.dumps(my_dictionary))
#size += sum(map(sys.getsizeof, my_dictionary.values())) + sum(map(sys.getsizeof, my_dictionary.keys()))
print(f"{text} size : {size}")
def cpu_stats(text=None):
#if not ON_KAGGLE:
pid = os.getpid()
py = psutil.Process(pid)
memory_use = py.memory_info()[0] / 2. ** 30
print('{} memory GB:'.format(text) + str(memory_use))#str(np.round(memory_use, 2)))
def reduce_mem_Series(se, verbose=True, categories=False):
numeric2reduce = ["int16", "int32", "int64", "float64"]
col_type = se.dtype
best_type = None
if (categories==True) & (col_type == "object"):
se = se.astype("category")
best_type = "category"
elif col_type in numeric2reduce:
downcast = "integer" if "int" in str(col_type) else "float"
se = pd.to_numeric(se, downcast=downcast)
best_type = se.dtype.name
if verbose and best_type is not None and best_type != str(col_type):
print(f"Series '{se.index}' converted from {col_type} to {best_type}")
return se
def reduce_mem_usage(df, verbose=True, categories=False):
# All types that we want to change for "lighter" ones.
# int8 and float16 are not include because we cannot reduce
# those data types.
# float32 is not include because float16 has too low precision.
numeric2reduce = ["int16", "int32", "int64", "float64"]
start_mem = 0
if verbose:
start_mem = df.memory_usage().sum() / 1024**2
#start_mem = memory_usage_mb(df, deep=deep)
for col, col_type in df.dtypes.iteritems():
best_type = None
if (categories==True) & (col_type == "object"):
df[col] = df[col].astype("category")
best_type = "category"
elif col_type in numeric2reduce:
downcast = "integer" if "int" in str(col_type) else "float"
df[col] = pd.to_numeric(df[col], downcast=downcast)
best_type = df[col].dtype.name
# Log the conversion performed.
if verbose and best_type is not None and best_type != str(col_type):
print(f"Column '{col}' converted from {col_type} to {best_type}")
if verbose:
#end_mem = memory_usage_mb(df, deep=deep)
end_mem = df.memory_usage().sum() / 1024**2
diff_mem = start_mem - end_mem
percent_mem = 100 * diff_mem / start_mem
print(f"Memory usage decreased from"
f" {start_mem:.2f}MB to {end_mem:.2f}MB"
f" ({diff_mem:.2f}MB, {percent_mem:.2f}% reduction)")
return df
@contextmanager
def timer(name):
t0 = time.time()
yield
print(f'[{name}] done in {time.time() - t0:.6f} s')
def normal_sampling(mean, label_k, std=2, under_limit=1e-15):
val = math.exp(-(label_k-mean)**2/(2*std**2))/(math.sqrt(2*math.pi)*std)
if val < under_limit:
val = under_limit
return val
def compHist(np_oof, np_y_pred, np_y_true, title_str):
np_list = [np_oof, np_y_true, np_y_pred]
label_list = ["oof", "true", "pred"]
color_list = ['red', 'blue', 'green']
for np_data, label, color in zip(np_list, label_list, color_list):
sns.distplot(
np_data,
#bins=sturges(len(data)),
color=color,
kde=True,
label=label
)
plt.savefig(str(PATH_TO_GRAPH_DIR / f"{title_str}_compHist.png"))
plt.close()
def compPredTarget(y_pred, y_true, index_list, title_str, lm_flag=False):
df_total = pd.DataFrame({"Prediction" : y_pred.flatten(),
"Target" : y_true.flatten(),
"Difference" : y_true.flatten() -y_pred.flatten()
#"type" : np.full(len(y_pred), "oof")
}, index=index_list)
print(df_total)
print("Difference > 0.1 : ", df_total[np.abs(df_total["Difference"]) > 0.1].Difference.count())
#print(df_total[df_total["type"]=="valid_train"].Difference)
fig = plt.figure()
sns.displot(df_total.Difference,bins=10)
plt.savefig(str(PATH_TO_GRAPH_DIR / f"{title_str}_oof_diff_distplot.png"))
plt.close()
#pdb.set_trace()
if lm_flag:
plt.figure()
fig2 = sns.lmplot(x="Target", y="Prediction", data=df_total, palette="Set1")
#fig.set_axis_labels('target', 'pred')
plt.title(title_str)
plt.tight_layout()
plt.savefig(str(PATH_TO_GRAPH_DIR / f"{title_str}_oof_true_lm.png"))
plt.close()
def dimensionReductionPCA(df, _n_components, prefix="PCA_"):
pca = PCA(n_components=_n_components)
pca.fit(df)
reduced_feature = pca.transform(df)
df_reduced = pd.DataFrame(reduced_feature, columns=[f"{prefix}{x + 1}" for x in range(_n_components)], index=df.index)
print(f"df_reduced:{df_reduced}")
df_tmp = pd.DataFrame(pca.explained_variance_ratio_, index=[f"{prefix}{x + 1}" for x in range(_n_components)])
print(df_tmp)
import matplotlib.ticker as ticker
plt.gca().get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
plt.plot([0] + list( np.cumsum(pca.explained_variance_ratio_)), "-o")
plt.xlabel("Number of principal components")
plt.ylabel("Cumulative contribution rate")
plt.grid()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_PCA.png")
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
# df_comp = pd.DataFrame(pca.components_, columns=df.columns, index=[f"{prefix}{x + 1}" for x in range(_n_components)])
# print(df_comp)
# plt.figure(figsize=(6, 6))
# for x, y, name in zip(pca.components_[0], pca.components_[1], df.columns):
# plt.text(x, y, name)
# plt.scatter(pca.components_[0], pca.components_[1], alpha=0.8)
# plt.grid()
# plt.xlabel("PC1")
# plt.ylabel("PC2")
# path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_PCA_scatter.png")
# #print("save: ", path_to_save)
# plt.savefig(path_to_save)
# plt.show(block=False)
# plt.close()
return df_reduced
def addNanPos(df, cols_list:list, suffix="nan_pos"):
for col in cols_list:
if df[col].isnull().any():
df["{}_{}".format(col, suffix)] = df[col].map(lambda x: 1 if pd.isna(x) else 0)
return df
def get_feature_importances(X, y, shuffle=False):
# 必要ならば目的変数をシャッフル
if shuffle:
y = np.random.permutation(y)
# モデルの学習
clf = RandomForestClassifier(random_state=42)
clf.fit(X, y)
# 特徴量の重要度を含むデータフレームを作成
imp_df = pd.DataFrame()
imp_df["feature"] = X.columns
imp_df["importance"] = clf.feature_importances_
return imp_df.sort_values("importance", ascending=False)
def nullImporcance(df_train_X, df_train_y, th=80, n_runs=100):
# 実際の目的変数でモデルを学習し、特徴量の重要度を含むデータフレームを作成
actual_imp_df = get_feature_importances(df_train_X, df_train_y, shuffle=False)
# 目的変数をシャッフルした状態でモデルを学習し、特徴量の重要度を含むデータフレームを作成
N_RUNS = n_runs
null_imp_df = pd.DataFrame()
for i in range(N_RUNS):
print("run : {}".format(i))
imp_df = get_feature_importances(df_train_X, df_train_y, shuffle=True)
imp_df["run"] = i + 1
null_imp_df = pd.concat([null_imp_df, imp_df])
def display_distributions(actual_imp_df, null_imp_df, feature, path_to_save_dir):
# ある特徴量に対する重要度を取得
actual_imp = actual_imp_df.query("feature == '{}'".format(feature))["importance"].mean()
null_imp = null_imp_df.query("feature == '{}'".format(feature))["importance"]
# 可視化
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
a = ax.hist(null_imp, label="Null importances")
ax.vlines(x=actual_imp, ymin=0, ymax=np.max(a[0]), color='r', linewidth=10, label='Real Target')
ax.legend(loc="upper right")
ax.set_title("Importance of {}".format(feature), fontweight='bold')
plt.xlabel("Null Importance Distribution for {}".format(feature))
plt.ylabel("Importance")
plt.show()
path_to_save = os.path.join(str(path_to_save_dir), "null_imp_{}".format(feature))
plt.savefig(path_to_save)
# 実データにおいて特徴量の重要度が高かった上位5位を表示
for feature in actual_imp_df["feature"]:
display_distributions(actual_imp_df, null_imp_df, feature, PATH_TO_GRAPH_DIR)
# 閾値を設定
THRESHOLD = th
# 閾値を超える特徴量を取得
null_features = []
for feature in actual_imp_df["feature"]:
print("Null :: {}".format(feature))
actual_value = actual_imp_df.query("feature=='{}'".format(feature))["importance"].values
null_value = null_imp_df.query("feature=='{}'".format(feature))["importance"].values
percentage = (null_value < actual_value).sum() / null_value.size * 100
print("actual_value: {}, null_value : {}, percentage : {}".format(actual_value, null_value, percentage))
if percentage < THRESHOLD and (100-THRESHOLD) < percentage:
null_features.append(feature)
return null_features
def makeFourArithmeticOperations(df, col1, col2):
new_col = "auto__{}_add_{}".format(col1, col2)
df[new_col] = df[col1] + df[col2]
new_col = "auto__{}_diff_{}".format(col1, col2)
df[new_col] = df[col1] - df[col2]
new_col = "auto__{}_multiply_{}".format(col1, col2)
df[new_col] = df[col1] * df[col2]
new_col = "auto__{}_devide_{}".format(col1, col2)
df[new_col] = df[col1] / df[col2]
return df
def procAgg(df:pd.DataFrame, base_group_col:str, agg_col:str, agg_list:list):
for agg_func in agg_list:
new_col = "auto__{}_{}_agg_by_{}".format(agg_col, agg_func, base_group_col)
map_dict = df.groupby(base_group_col)[agg_col].agg(agg_func)
print(new_col)
print(map_dict)
df[new_col] = df[base_group_col].map(map_dict)
df[new_col] = reduce_mem_Series(df[new_col])
#df = makeFourArithmeticOperations(df, new_col, agg_col)
return df
def aggregationFE(df:pd.DataFrame, base_group_cols:list, agg_cols:list, agg_func_list:list=['count', 'max', 'min', 'sum', 'mean', "nunique", "std", "median", "skew"]):
for b in base_group_cols:
for a in agg_cols:
df = procAgg(df, b, a, agg_func_list)
return df
def makeInteractionColumn(df:pd.DataFrame, inter_cols:list):
print(inter_cols)
for c in inter_cols:
col_name = "inter_" + "_".join(c)
print(col_name)
#df[col_name] = "_"
for i, col in enumerate(c):
print(col)
if i == 0:
df[col_name] = df[col]
else:
#
#print(df[col])
df[col_name] = df[col_name].map(lambda x : str(x)) + "_" + df[col].map(lambda x : str(x))
#print(df[col_name].unique())
print("****")
return df
def interactionFE(df:pd.DataFrame, cols:list=[], inter_nums:list=[]):
for inter_num in inter_nums:
inter_cols = itertools.combinations(cols, inter_num)
df = makeInteractionColumn(df, inter_cols)
# for c in itertools.combinations(cols, inter_num):
#
# col_name = "inter_" + "_".join(c)
# print(col_name)
# #df[col_name] = "_"
#
# for i, col in enumerate(c):
# print(col)
# if i == 0:
# df[col_name] = df[col]
# else:
# #
# #print(df[col])
# df[col_name] = df[col_name].map(lambda x : str(x)) + "_" + df[col].map(lambda x : str(x))
#
# print(df[col_name].unique())
return df
def interactionFEbyOne(df:pd.DataFrame, inter_col:str, target_cols:list, inter_nums:list=[1]):
for inter_num in inter_nums:
comb = itertools.combinations(target_cols, inter_num)
for c in comb:
if not inter_col in c:
inter_cols = (inter_col,) + c
print(inter_cols)
df = makeInteractionColumn(df, [inter_cols])
return df
def calcSmoothingParam(num_of_data, k=100, f=100):
param = 1 / (1 + np.exp(-(num_of_data - k)/f))
return param
def calcSmoothingTargetMean(df:pd.DataFrame, col_name, target_name):
#print(df[target_name])
all_mean = df[target_name].mean()
#print(all_mean)
#sys.exit()
df_vc = df[col_name].value_counts()
gp_mean_dict = df.groupby(col_name)[target_name].mean()
smooth_target_mean = df_vc.copy()
for key, val in gp_mean_dict.items():
n=df_vc[key]
param = calcSmoothingParam(num_of_data=n)
smooth = param * val + (1-param)*all_mean
smooth_target_mean[key] = smooth
print("label : {}, n = {}, val={}, all = {}, param = {}, final={}".format(key, n, val, all_mean, param, smooth))
del smooth_target_mean, df_vc
gc.collect()
return smooth_target_mean
def targetEncoding(df_train_X, df_train_y, encoding_cols:list, _n_splits=4, smooth_flag=0):
dict_all_train_target_mean = {}
for c in encoding_cols:
# print("Target Encoding : {}".format(c))
# print(f"df_train_X[c] : {df_train_X[c].shape}")
# print(f"df_train_y : {df_train_y.shape}")
#df_data_tmp = pd.DataFrame({c: df_train_X[c], "target":df_train_y})
df_data_tmp = pd.DataFrame(df_train_X[c])
df_data_tmp["target"] = df_train_y#.loc[:,0]
#nan_mean= -999#df_data_tmp["target"].mean()
nan_mean=df_data_tmp["target"].mean()
if smooth_flag:
all_train_target_mean=calcSmoothingTargetMean(df_data_tmp, c, "target")
else:
all_train_target_mean = df_data_tmp.groupby(c)["target"].mean()
dict_all_train_target_mean[c] = all_train_target_mean
#print(all_train_target_mean)
#df_test_X[c] = df_test_X[c].map(all_train_target_mean)
tmp = np.repeat(np.nan, df_train_X.shape[0])
kf = KFold(n_splits=_n_splits, shuffle=True, random_state=0)
for idx_1, idx_2 in kf.split(df_train_X):
if smooth_flag:
target_mean=calcSmoothingTargetMean(df_data_tmp.iloc[idx_1], c, "target")
else:
target_mean = df_data_tmp.iloc[idx_1].groupby(c)["target"].mean()
tmp[idx_2] = df_train_X[c].iloc[idx_2].map(target_mean)
idx_1_unique = df_data_tmp.iloc[idx_1][c].unique()
idx_2_unique = df_data_tmp.iloc[idx_2][c].unique()
for c2 in idx_2_unique:
if not c2 in idx_1_unique:
pass
#print("TARGET ENCORDING ERROR {}: {} replace to {}".format(c, c2, nan_mean))
df_train_X[c] = tmp
df_train_X[c].fillna(value=nan_mean, inplace=True)
#print(df_train_X.loc[df_train_X[c].isnull(), c])
#showNAN(df_train_X)
return df_train_X, dict_all_train_target_mean
def add_noise(series, noise_level):
return series * (1 + noise_level * np.random.randn(len(series)))
def calc_smoothing(se_gp_count, se_gp_mean, prior, min_samples_leaf=1, smoothing=1):
smoothing = 1 / (1 + np.exp(-(se_gp_count - min_samples_leaf) / smoothing))
se_smoothing_mean = prior * (1 - smoothing) + se_gp_mean * smoothing
return se_smoothing_mean #, smoothing
def TEST__calc_smoothing():
se_count = pd.Series(np.arange(2000000))
cpu_stats("before calc_smoothing")
se_count, smoothing = calc_smoothing(se_count, se_count, prior=50, min_samples_leaf=100, smoothing=300)
cpu_stats("after calc_smoothing")
#fig = plt.Figure()
plt.plot(se_count, smoothing, label="smoothing")
plt.show()
def target_encode_with_smoothing(trn_series=None,
#tst_series=None,
target_se=None,
min_samples_leaf=1,
smoothing=1,
#noise_level=0,
agg_val="mean",
):
"""
from https://www.kaggle.com/ogrellier/python-target-encoding-for-categorical-features/notebook
Smoothing is computed like in the following paper by <NAME>
https://kaggle2.blob.core.windows.net/forum-message-attachments/225952/7441/high%20cardinality%20categoricals.pdf
trn_series : training categorical feature as a pd.Series
tst_series : test categorical feature as a pd.Series
target : target data as a pd.Series
min_samples_leaf (int) : minimum samples to take category average into account
smoothing (int) : smoothing effect to balance categorical average vs prior
"""
assert len(trn_series) == len(target_se)
#assert trn_series.name == tst_series.name
temp = pd.concat([trn_series, target_se], axis=1)
# Compute target mean
averages = temp.groupby(by=trn_series.name)[target_se.name].agg([agg_val, "count"])
# Compute smoothing
smoothing = 1 / (1 + np.exp(-(averages["count"] - min_samples_leaf) / smoothing))
# Apply average function to all target data
if agg_val == "mean":
prior = target_se.mean()
elif agg_val == "std":
prior = target_se.std()
# The bigger the count the less full_avg is taken into account
averages[target_se.name] = prior * (1 - smoothing) + averages[agg_val] * smoothing
averages.drop([agg_val, "count"], axis=1, inplace=True)
return averages
# # Apply averages to trn and tst series
# ft_trn_series = pd.merge(
# trn_series.to_frame(trn_series.name),
# averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
# on=trn_series.name,
# how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
# # pd.merge does not keep the index so restore it
# ft_trn_series.index = trn_series.index
# ft_tst_series = pd.merge(
# tst_series.to_frame(tst_series.name),
# averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
# on=tst_series.name,
# how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
# pd.merge does not keep the index so restore it
#ft_tst_series.index = tst_series.index
#return add_noise(ft_trn_series, noise_level), add_noise(ft_tst_series, noise_level)
def showNAN(df):
# print(df.isnull())
# df.isnull().to_csv(PROC_DIR/'isnull.csv')
# print(df.isnull().sum())
# df.isnull().sum().to_csv(PROC_DIR/'isnull_sum.csv')
# total = df.isnull().sum().sort_values(ascending=False)
# print(total)
# print(f"count : {df.isnull().count()}")
# percent = (df.isnull().sum()/df.isnull().count()).sort_values(ascending=False)
# missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
#df=df.replace([np.inf, -np.inf], np.nan)
nan_dict = {}
for col in df.columns:
total = df[col].isnull().sum()
percent = total / df[col].isnull().count()
nan_dict[col] = [total, percent]
missing_data = pd.DataFrame(nan_dict, index=['Total', 'Percent']).T
missing_data = missing_data.sort_values('Percent', ascending=False)
nan_data = missing_data.loc[missing_data["Percent"] > 0, :]
if not ON_KAGGLE:
print("****show nan*****")
print(nan_data)
print("****show nan end*****\n")
nan_list = list(nan_data.index)
#del missing_data, nan_data
#gc.collect()
return nan_list
def accumAdd(accum_dict, dict_key_name, add_val, _empty_val=EMPTY_NUM):
if accum_dict[dict_key_name] == _empty_val:
accum_dict[dict_key_name] = add_val
else:
accum_dict[dict_key_name] += add_val
return accum_dict
def getColumnsFromParts(colums_parts, base_columns):
new_cols=[]
for col_p in colums_parts:
for col in base_columns:
if col_p in col:
if not col in new_cols:
new_cols.append(col)
#print("find from the part : {}".format(col))
return new_cols.copy()
def checkCorreatedFeatures(df, exclude_columns=[], th=0.995, use_cols=[]):
counter = 0
to_remove = []
if len(use_cols)==0:
use_cols = df.columns
for feat_a in use_cols:
if feat_a in exclude_columns:
continue
for feat_b in df.columns:
if feat_b in exclude_columns:
continue
if feat_a != feat_b and feat_a not in to_remove and feat_b not in to_remove:
#print('{}: FEAT_A: {}, FEAT_B: {}'.format(counter, feat_a, feat_b))
c = np.corrcoef(df[feat_a], df[feat_b])[0][1]
if c > th:
counter += 1
to_remove.append(feat_b)
print('{}: FEAT_A: {}, FEAT_B (removed): {} - Correlation: {}'.format(counter, feat_a, feat_b, c))
return to_remove.copy()
def addStrToLastWithoutContinuous(chain_string, added_str, splitter="_"):
string_list = chain_string.split(splitter)
if string_list[-1] != added_str:
return chain_string + splitter + added_str
else:
return chain_string
def adv2(_df_train, _df_test, drop_cols):
df_train = _df_train.copy()
df_test = _df_test.copy()
print(len(df_train))
print(len(df_test))
df_train["isTest"] = 0
df_test["isTest"] = 1
drop_cols.append("isTest")
df = pd.concat([df_train, df_test])
#train 0, test 1
df_X = df.drop(columns= drop_cols)
df_y = df["isTest"]
columns=df_X.columns.to_list()
train, test, y_train, y_test = train_test_split(df_X, df_y, test_size=0.33, random_state=42, shuffle=True)
del df, df_y, df_X
gc.collect()
train = lgb.Dataset(train, label=y_train)
test = lgb.Dataset(test, label=y_test)
param = {'num_leaves': 50,
'min_data_in_leaf': 30,
'objective':'binary',
'max_depth': 5,
'learning_rate': 0.05,
"min_child_samples": 20,
"boosting": "gbdt",
"feature_fraction": 0.9,
"bagging_freq": 1,
"bagging_fraction": 0.9 ,
"bagging_seed": 44,
"metric": 'auc',
"verbosity": -1,
'importance_type':'gain',
}
num_round = 1000
clf = lgb.train(param, train, num_round, valid_sets = [train, test], verbose_eval=50, early_stopping_rounds = 500)
feature_imp = pd.DataFrame(sorted(zip(clf.feature_importance(),columns)), columns=['Value','Feature'])
plt.figure(figsize=(20, 20))
sns.barplot(x="Value", y="Feature", data=feature_imp.sort_values(by="Value", ascending=False).head(100))
plt.title('LightGBM Features')
plt.tight_layout()
plt.show()
plt.savefig(str(PATH_TO_GRAPH_DIR / 'lgbm_importances-01.png'))
def adversarialValidation(_df_train, _df_test, drop_cols, sample_flag=False):
df_train = _df_train.copy()
df_test = _df_test.copy()
if sample_flag:
num_test = len(df_test)
df_train = df_train.sample(n=num_test)
print(f"df_train : {len(df_train)}")
print(f"df_test : {len(df_test)}")
df_train["isTest"] = 0
df_test["isTest"] = 1
drop_cols.append("isTest")
df = pd.concat([df_train, df_test])
#train 0, test 1
df_X = df.drop(columns= drop_cols)
df_y = df["isTest"]
adv_params = {
'learning_rate': 0.05,
'n_jobs': -1,
'seed': 50,
'objective':'binary',
'boosting_type':'gbdt',
'is_unbalance': False,
'importance_type':'gain',
'metric': 'auc',
'verbose': 1,
}
model = lgb.LGBMClassifier(n_estimators=100)
model.set_params(**adv_params)
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
score = cross_validate(model, df_X, df_y, cv=skf, return_estimator=True, scoring="roc_auc")
adv_acc = score['test_score'].mean()
print('Adv AUC:', score['test_score'].mean())
feature_imp = pd.DataFrame(sorted(zip(score['estimator'][0].feature_importances_,df_X.columns), reverse=True), columns=['Value','Feature'])
print(feature_imp)
#graphImportance(feature_imp, 50)
#base_name = '../data/features/adv_feature_imp_' + datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
#feature_imp.to_csv(base_name + '.csv')
# f = open(base_name + '.pkl', 'wb')
# pickle.dump(feature_imp, f)
# f.close()
for i in range(len(feature_imp["Feature"])):
if feature_imp["Value"].values[i] > 0:
str_col = "\'" + feature_imp["Feature"].values[i] + "\',"
print(str_col)
return adv_acc, feature_imp
def get_too_many_null_attr(data, rate=0.9):
# many_null_cols = []
# for col in data.columns:
# print(col)
# if data[col].isnull().sum() / data.shape[0] > 0.9:
# many_null_cols.append(col)
# print("DONE!!!!!")
many_null_cols = [col for col in data.columns if data[col].isnull().sum() / data.shape[0] > rate]
return many_null_cols
def get_too_many_repeated_val(data, rate=0.95):
big_top_value_cols = [col for col in data.columns if data[col].value_counts(dropna=False, normalize=True).values[0] > rate]
return big_top_value_cols
def get_useless_columns(data, null_rate=0.95, repeat_rate=0.95):
too_many_null = get_too_many_null_attr(data, null_rate)
print("More than {}% null: ".format(null_rate) + str(len(too_many_null)))
print(too_many_null)
too_many_repeated = get_too_many_repeated_val(data, repeat_rate)
print("More than {}% repeated value: ".format(repeat_rate) + str(len(too_many_repeated)))
print(too_many_repeated)
cols_to_drop = list(set(too_many_null + too_many_repeated))
return cols_to_drop
def get_useless_columnsTrainTest(df_train, df_test, null_rate=0.95, repeat_rate=0.95):
drop_train = set(get_useless_columns(df_train, null_rate=null_rate, repeat_rate=repeat_rate))
drop_test = set(get_useless_columns(df_test, null_rate=null_rate, repeat_rate=repeat_rate)) if not df_test.empty else set([])
s_symmetric_difference = drop_train ^ drop_test
if s_symmetric_difference:
print("{} are not included in each set".format(s_symmetric_difference))
cols_to_drop = list((drop_train) & (drop_test))
print("intersection cols_to_drop")
print(cols_to_drop)
return cols_to_drop
def transformCosCircle(df, time_col_str):
val = [float(x) for x in df[time_col_str].unique()]
val.sort()
#print(val)
num = len(val)
unit = 180.0 / num
#print(unit)
trans_val = [x * unit for x in val]
#print(trans_val)
df[time_col_str + "_angle_rad"] = np.deg2rad(df[time_col_str].replace(val, trans_val))
df[time_col_str + "_cos"] = np.cos(df[time_col_str + "_angle_rad"])
df[time_col_str + "_sin"] = np.sin(df[time_col_str + "_angle_rad"])
df = df.drop(columns=[time_col_str + "_angle_rad"])
#print(df[time_col_str])
return df
def extract_time_features(df, date_col):
df[date_col] = pd.to_datetime(df[date_col])
df['month'] = df[date_col].dt.month
df['day'] = df[date_col].dt.day
#df['hour'] = df[date_col].dt.hour
df['year'] = df[date_col].dt.year
#df["seconds"] = df[date_col].dt.second
df['dayofweek'] = df[date_col].dt.dayofweek #0:monday to 6: sunday
#df['week'] = df[date_col].dt.week # the week ordinal of the year
df['weekofyear'] = df[date_col].dt.weekofyear # the week ordinal of the year
df['dayofyear'] = df[date_col].dt.dayofyear #1-366
df['quarter'] = df[date_col].dt.quarter
df['is_month_start'] = df[date_col].dt.is_month_start
df['is_month_end'] = df[date_col].dt.is_month_end
df['is_quarter_start'] = df[date_col].dt.is_quarter_start
df['is_quarter_end'] = df[date_col].dt.is_quarter_end
df['is_year_start'] = df[date_col].dt.is_year_start
df['is_year_end'] = df[date_col].dt.is_year_end
df['is_leap_year'] = df[date_col].dt.is_leap_year
df['days_in_month'] = df['date'].dt.daysinmonth
df["days_from_end_of_month"] = df['days_in_month'] - df["day"]
df["days_rate_in_month"] = (df["day"] -1) / (df['days_in_month'] - 1)
df["s_m_e_in_month"] = df["day"].map(lambda x: 0 if x <= 10 else (1 if x <= 20 else 2))
# df = transformCosCircle(df, "day")
# df = transformCosCircle(df, "month")
# df = transformCosCircle(df, "dayofweek")
# df = transformCosCircle(df, "weekofyear")
# df = transformCosCircle(df, "dayofyear")
return df
def pickle_dump(obj, path):
with open(path, mode='wb') as f:
pickle.dump(obj,f)
def pickle_load(path):
data = None
with open(path, mode='rb') as f:
data = pickle.load(f)
return data
def procLabelEncToColumns(df_train, df_test, col_list):
df_train["train_test_judge"] = "train"
df_test["train_test_judge"] = "test"
df = pd.concat([df_train, df_test])
for c in col_list:
df[c] = procLabelEncToSeries(df[c])
df_train = df.loc[df["train_test_judge"]=="train"].drop(columns=["train_test_judge"])
df_test = df.loc[df["train_test_judge"]=="test"].drop(columns=["train_test_judge"])
return df_train, df_test
def procLabelEncToSeries(se):
val_list = list(se.dropna().unique())
val_list.sort()
#print(df[f].unique())
replace_map = dict(zip(val_list, np.arange(len(val_list))))
se = se.map(replace_map)
#pdb.set_trace()
return se
def proclabelEncodings(df, not_proc_list=[]):
#lbl = preprocessing.LabelEncoder()
if not ON_KAGGLE:
print("**label encoding**")
decode_dict = {}
for f in df.columns:
if df[f].dtype.name =='object':
if f in not_proc_list:
continue
if not ON_KAGGLE:
print(f)
val_list = list(df[f].dropna().unique())
val_list.sort()
#print(df[f].unique())
replace_map = dict(zip(val_list, np.arange(len(val_list))))
df[f] = df[f].map(replace_map)
#print(df[f].unique())
inverse_dict = get_swap_dict(replace_map)
decode_dict[f] = inverse_dict
#lbl.fit(list(df[f].dropna().unique()))
#print(list(lbl.classes_))
#df[f] = lbl.transform(list(df[f].values))
if not ON_KAGGLE:
print("**label encoding end **\n")
print("**for dicode**")
#print(f"{decode_dict}")
return df, decode_dict
def qwk(act,pred,n=4,hist_range=(0,3), weights=None):
O = confusion_matrix(act,pred, labels=[0, 1, 2, 3],sample_weight = weights)
O = np.divide(O,np.sum(O))
W = np.zeros((n,n))
for i in range(n):
for j in range(n):
W[i][j] = ((i-j)**2)/((n-1)**2)
act_hist = np.histogram(act,bins=n,range=hist_range, weights=weights)[0]
prd_hist = np.histogram(pred,bins=n,range=hist_range, weights=weights)[0]
E = np.outer(act_hist,prd_hist)
E = np.divide(E,np.sum(E))
num = np.sum(np.multiply(W,O))
den = np.sum(np.multiply(W,E))
return 1-np.divide(num,den)
def calcClass(X, coef):
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
else:
X_p[i] = 3
return X_p
class OptimizedRounder(object):
"""
An optimizer for rounding thresholds
to maximize Quadratic Weighted Kappa (QWK) score
"""
def __init__(self):
self.coef_ = 0
def _kappa_loss(self, coef, X, y):
#print(coef)
"""
Get loss according to
using current coefficients
:param coef: A list of coefficients that will be used for rounding
:param X: The raw predictions
:param y: The ground truth labels
"""
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
#elif pred >= coef[2] and pred < coef[3]:
# X_p[i] = 3
else:
X_p[i] = 3
#ll = cohen_kappa_score(y, X_p, weights='quadratic')
ll = qwk(y, X_p)
#print(ll)
return -ll
def fit(self, X, y, initial_coef):
"""
Optimize rounding thresholds
:param X: The raw predictions
:param y: The ground truth labels
"""
loss_partial = partial(self._kappa_loss, X=X, y=y)
#initial_coef = th_list
self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead')
return self.coefficients()
def predict(self, X, coef):
"""
Make predictions with specified thresholds
:param X: The raw predictions
:param coef: A list of coefficients that will be used for rounding
"""
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
#elif pred >= coef[2] and pred < coef[3]:
# X_p[i] = 3
else:
X_p[i] = 3
return X_p
def coefficients(self):
"""
Return the optimized coefficients
"""
return self.coef_['x']
def calcDropColsFromPermuationImportance(path_to_dir):
ppath_to_dir = Path(path_to_dir)
df_total = pd.DataFrame()
for i, f in enumerate(ppath_to_dir.glob("permutation_feature_imp_*.csv")):
df_imp = pd.read_csv(f, index_col=1).rename(columns={'weight': 'weight{}'.format(i)})
if i == 0:
df_total = df_imp['weight{}'.format(i)]
else:
df_total = pd.concat([df_total, df_imp['weight{}'.format(i)]], axis=1)
df_total["mean"] = df_total.mean(axis=1)
df_total.to_csv(ppath_to_dir/("total_" + datetime.now().strftime("%Y%m%d_%H%M%S") + ".csv"))
drop_list = list(df_total.loc[df_total["mean"] <= 0].index.values)
for col in drop_list:
print('"{}",'.format(col))
return drop_list
def stract_hists(feature, train, test, adjust=False, plot=False):
n_bins = 10
train_data = train[feature]
test_data = test[feature]
if adjust:
test_data *= train_data.mean() / test_data.mean()
perc_90 = np.percentile(train_data, 95)
train_data = np.clip(train_data, 0, perc_90)
test_data = np.clip(test_data, 0, perc_90)
train_hist = np.histogram(train_data, bins=n_bins)[0] / len(train_data)
test_hist = np.histogram(test_data, bins=n_bins)[0] / len(test_data)
msre = mean_squared_error(train_hist, test_hist)
#print(msre)
if plot:
print(msre)
plt.bar(range(n_bins), train_hist, color='blue', alpha=0.5)
plt.bar(range(n_bins), test_hist, color='red', alpha=0.5)
plt.show()
return msre
def get_swap_dict(d):
return {v: k for k, v in d.items()}
def testComp():
y_pred = np.arange(5).flatten()
y_true = np.arange(5).flatten()* 2
compPredTarget(y_pred, y_true, np.arange(5).flatten(), title_str="oof_diff")
def transformMultiOneHot(df, col_name, splitter=",", drop_original_col=True):
original_elements_list = df[col_name].dropna().unique()
print(original_elements_list)
duplicate_set = set([col for cols in original_elements_list for col in cols.split(splitter) ])
print(duplicate_set)
for c in duplicate_set:
c_name = f"OH_{col_name}_{c}"
#df[c_name] = 0
df[c_name] = df[col_name].map(lambda x:(1 if c in x.split(splitter) else 0) if pd.isnull(x) == False else 0)
print(df[c_name].value_counts())
if drop_original_col:
df.drop(col_name, axis=1, inplace=True)
return df
def showROC(test_y, pred_y, path_to_save):
# FPR, TPR(, しきい値) を算出
fpr, tpr, thresholds = roc_curve(test_y, pred_y)
# ついでにAUCも
auc_val = auc(fpr, tpr)
# ROC曲線をプロット
plt.plot(fpr, tpr, label='ROC curve (area = %.2f)'%auc_val)
plt.legend()
plt.title('ROC curve')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.grid(True)
plt.savefig(path_to_save)
plt.close()
def visualizeComp(df, y_true, y_pred, category_cols=[]):
print("vis")
print(f"df : {df.shape}")
print(f"y_true : {y_true}")
print(f"y_pred : {y_pred}")
target_col = "target"
df["pred"] = y_pred.astype(float)
df[target_col] = y_true.astype(float)
if len(category_cols) == 0:
category_cols = df.select_dtypes(include=['object']).columns
for col in category_cols:
vals = df[col].unique()
for val in vals:
str_title = f"{col}_{val}"
try:
tmp_df = df.loc[df[col]==val, [target_col, "pred"]]
print(tmp_df)
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), "{}_result_comp_{}.png".format(datetime.now().strftime("%Y%m%d_%H%M%S"), str_title))
showROC(test_y=tmp_df[target_col].values, pred_y=tmp_df["pred"].values, path_to_save=path_to_save)
# plt.figure()
# fig2 = sns.lmplot(x=target_col, y="pred", data=tmp_df, palette="Set1")
# #fig.set_axis_labels('target', 'pred')
# plt.title(str_title)
# plt.tight_layout()
# plt.savefig(os.path.join(str(PATH_TO_GRAPH_DIR), "{}_result_comp_{}.png".format(datetime.now().strftime("%Y%m%d_%H%M%S"), str_title)))
# plt.close()
except Exception as e:
print(e)
print("******[error col_val : {}]******".format(str_title))
def test_inter():
cols = ["area", "station", "baba"]
inter_cols = itertools.combinations(cols, 2)
for col in inter_cols:
print(col)
inter_cols = itertools.combinations(cols, 3)
for col in inter_cols:
print(col)
def calcPermutationWeightMean(df_list):
# e_df = df_list[0]["weight"]
# if len(df_list) > 1:
# for df in df_list[1:]:
# e_df += df["weight"]
# mean = e_df/len(df_list)
# #print(e_df)
# print(mean[mean<0])
total_df = pd.DataFrame()
for i, df in enumerate(df_list):
total_df[f"weight_{i}"] = df["weight"]
#print(total_df)
total_df["weight_mean"] = total_df.mean(axis=1)
total_df = total_df.sort_values("weight_mean", ascending=False)
#print(total_df.loc[total_df["weight_mean"]<0])
return total_df
def testpermu():
df0 = pd.read_csv(PATH_TO_FEATURES_DIR/"permutation_feature_imp_fold020200728_003522.csv", index_col=0)
df0 = df0.set_index("feature")
df1 = pd.read_csv(PATH_TO_FEATURES_DIR/"permutation_feature_imp_fold120200728_003957.csv", index_col=0)
df1 = df1.set_index("feature")
idx = "madori"
print(df0.loc[idx])
print(df1.loc[idx])
df_total = df0["weight"] + df1["weight"]
print(df_total.loc[idx])
df2 = | pd.read_csv(PATH_TO_FEATURES_DIR/"permutation_feature_imp_fold220200728_004355.csv", index_col=0) | pandas.read_csv |
# generic libraries
import os
import glob
from xml.etree import ElementTree
import numpy as np
import pandas as pd
from .read_sentinel2 import get_root_of_table
from ..generic.mapping_io import read_geo_image
# dove-C
def list_central_wavelength_dc():
center_wavelength = {"B1": 485., "B2" : 545., "B3" : 630., "B4" : 820.}
full_width_half_max = {"B1": 60., "B2" : 90., "B3" : 80., "B4" : 80.}
gsd = {"B1" : 3., "B2" : 3., "B3" : 3., "B4" : 3.}
bandid = {"B1": 0, "B2" : 1, "B3" : 2, "B4" : 3}
acquisition_order = {"B1": 1, "B2": 1, "B3": 1, "B4": 2}
# along_track_view_angle =
# field_of_view =
common_name = {"B1" : 'blue', "B2" : 'green', "B3" : 'red', "B4" : 'nir'}
d = {
"center_wavelength": pd.Series(center_wavelength,
dtype=np.dtype("float")),
"full_width_half_max": pd.Series(full_width_half_max,
dtype=np.dtype("float")),
"gsd": pd.Series(gsd, dtype=np.dtype("float")),
"common_name": pd.Series(common_name, dtype="string"),
"acquisition_order": pd.Series(acquisition_order,
dtype=np.dtype("int64")),
"bandid": pd.Series(bandid, dtype=np.dtype("int64"))
}
df = pd.DataFrame(d)
return df
# dove-R
def list_central_wavelength_dr():
center_wavelength = {"B1": 485., "B2" : 545., "B3" : 630., "B4" : 820.}
full_width_half_max = {"B1":60., "B2" : 90., "B3" : 80., "B4" : 80.}
gsd = {"B1" : 3., "B2" : 3., "B3" : 3., "B4": 3.}
bandid = {"B1": 0, "B2" : 1, "B3" : 2, "B4" : 3}
# along_track_view_angle =
field_of_view = {"B1": 3., "B2": 3., "B3": 3., "B4": 3.}
acquisition_order = {"B1": 4, "B2": 1, "B3": 2, "B4": 3}
common_name = {"B1" : 'blue', "B2" : 'green', "B3" : 'red', "B4" : 'nir'}
d = {
"center_wavelength": pd.Series(center_wavelength,
dtype=np.dtype("float")),
"full_width_half_max": pd.Series(full_width_half_max,
dtype=np.dtype("float")),
"gsd": pd.Series(gsd, dtype=np.dtype("float")),
"common_name": pd.Series(common_name, dtype="string"),
"field_of_view": pd.Series(field_of_view, dtype=np.dtype("float")),
"acquisition_order": pd.Series(acquisition_order,
dtype=np.dtype("int64")),
"bandid": pd.Series(bandid, dtype=np.dtype("int64"))
}
df = | pd.DataFrame(d) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.data_checks import (
ClassImbalanceDataCheck,
DataCheckError,
DataCheckMessageCode,
DataCheckWarning,
)
class_imbalance_data_check_name = ClassImbalanceDataCheck.name
def test_class_imbalance_errors():
X = pd.DataFrame()
with pytest.raises(ValueError, match="threshold 0 is not within the range"):
ClassImbalanceDataCheck(threshold=0).validate(X, y=pd.Series([0, 1, 1]))
with pytest.raises(ValueError, match="threshold 0.51 is not within the range"):
ClassImbalanceDataCheck(threshold=0.51).validate(X, y=pd.Series([0, 1, 1]))
with pytest.raises(ValueError, match="threshold -0.5 is not within the range"):
ClassImbalanceDataCheck(threshold=-0.5).validate(X, y=pd.Series([0, 1, 1]))
with pytest.raises(ValueError, match="Provided number of CV folds"):
ClassImbalanceDataCheck(num_cv_folds=-1).validate(X, y=pd.Series([0, 1, 1]))
with pytest.raises(ValueError, match="Provided value min_samples"):
ClassImbalanceDataCheck(min_samples=-1).validate(X, y=pd.Series([0, 1, 1]))
@pytest.mark.parametrize("input_type", ["pd", "np", "ww"])
def test_class_imbalance_data_check_binary(input_type):
X = pd.DataFrame()
y = pd.Series([0, 0, 1])
y_long = pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
y_balanced = pd.Series([0, 0, 1, 1])
if input_type == "np":
X = X.to_numpy()
y = y.to_numpy()
y_long = y_long.to_numpy()
y_balanced = y_balanced.to_numpy()
elif input_type == "ww":
X.ww.init()
y = ww.init_series(y)
y_long = ww.init_series(y_long)
y_balanced = ww.init_series(y_balanced)
class_imbalance_check = ClassImbalanceDataCheck(min_samples=1, num_cv_folds=0)
assert class_imbalance_check.validate(X, y) == []
assert class_imbalance_check.validate(X, y_long) == [
DataCheckWarning(
message="The following labels fall below 10% of the target: [0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [0]},
).to_dict()
]
assert ClassImbalanceDataCheck(
threshold=0.25, min_samples=1, num_cv_folds=0
).validate(X, y_long) == [
DataCheckWarning(
message="The following labels fall below 25% of the target: [0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [0]},
).to_dict()
]
class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=1)
assert class_imbalance_check.validate(X, y) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 2 instances: [1]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": [1]},
).to_dict()
]
assert class_imbalance_check.validate(X, y_balanced) == []
class_imbalance_check = ClassImbalanceDataCheck()
assert class_imbalance_check.validate(X, y) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 6 instances: [0, 1]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": [0, 1]},
).to_dict()
]
@pytest.mark.parametrize("input_type", ["pd", "np", "ww"])
def test_class_imbalance_data_check_multiclass(input_type):
X = pd.DataFrame()
y = pd.Series([0, 2, 1, 1])
y_imbalanced_default_threshold = pd.Series([0, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
y_imbalanced_set_threshold = pd.Series(
[0, 2, 2, 2, 2, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
)
y_imbalanced_cv = pd.Series([0, 1, 2, 2, 1, 1, 1])
y_long = pd.Series([0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4])
if input_type == "np":
X = X.to_numpy()
y = y.to_numpy()
y_imbalanced_default_threshold = y_imbalanced_default_threshold.to_numpy()
y_imbalanced_set_threshold = y_imbalanced_set_threshold.to_numpy()
y_imbalanced_cv = y_imbalanced_cv.to_numpy()
y_long = y_long.to_numpy()
elif input_type == "ww":
X.ww.init()
y = ww.init_series(y)
y_imbalanced_default_threshold = ww.init_series(y_imbalanced_default_threshold)
y_imbalanced_set_threshold = ww.init_series(y_imbalanced_set_threshold)
y_imbalanced_cv = ww.init_series(y_imbalanced_cv)
y_long = ww.init_series(y_long)
class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=0)
assert class_imbalance_check.validate(X, y) == []
assert class_imbalance_check.validate(X, y_imbalanced_default_threshold) == [
DataCheckWarning(
message="The following labels fall below 10% of the target: [0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [0]},
).to_dict(),
DataCheckWarning(
message="The following labels in the target have severe class imbalance because they fall under 10% of the target and have less than 100 samples: [0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE,
details={"target_values": [0]},
).to_dict(),
]
assert ClassImbalanceDataCheck(
threshold=0.25, num_cv_folds=0, min_samples=1
).validate(X, y_imbalanced_set_threshold) == [
DataCheckWarning(
message="The following labels fall below 25% of the target: [3, 0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [3, 0]},
).to_dict()
]
class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=2)
assert class_imbalance_check.validate(X, y_imbalanced_cv) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 4 instances: [0, 2]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": [0, 2]},
).to_dict()
]
assert class_imbalance_check.validate(X, y_long) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 4 instances: [0, 1]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": [0, 1]},
).to_dict()
]
class_imbalance_check = ClassImbalanceDataCheck()
assert class_imbalance_check.validate(X, y_long) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 6 instances: [0, 1, 2, 3]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": [0, 1, 2, 3]},
).to_dict()
]
@pytest.mark.parametrize("input_type", ["pd", "np", "ww"])
def test_class_imbalance_empty_and_nan(input_type):
X = pd.DataFrame()
y_empty = pd.Series([])
y_has_nan = pd.Series([np.nan, np.nan, np.nan, np.nan, 1, 1, 1, 1, 2])
if input_type == "np":
X = X.to_numpy()
y_empty = y_empty.to_numpy()
y_has_nan = y_has_nan.to_numpy()
elif input_type == "ww":
X.ww.init()
y_empty = ww.init_series(y_empty)
y_has_nan = ww.init_series(y_has_nan)
class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=0)
assert class_imbalance_check.validate(X, y_empty) == []
assert ClassImbalanceDataCheck(
threshold=0.5, min_samples=1, num_cv_folds=0
).validate(X, y_has_nan) == [
DataCheckWarning(
message="The following labels fall below 50% of the target: [2.0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [2.0]},
).to_dict()
]
assert ClassImbalanceDataCheck(threshold=0.5, num_cv_folds=0).validate(
X, y_has_nan
) == [
DataCheckWarning(
message="The following labels fall below 50% of the target: [2.0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [2.0]},
).to_dict(),
DataCheckWarning(
message="The following labels in the target have severe class imbalance because they fall under 50% of the target and have less than 100 samples: [2.0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE,
details={"target_values": [2.0]},
).to_dict(),
]
class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=1)
assert class_imbalance_check.validate(X, y_empty) == []
assert ClassImbalanceDataCheck(threshold=0.5, num_cv_folds=1).validate(
X, y_has_nan
) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 2 instances: [2.0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": [2.0]},
).to_dict(),
DataCheckWarning(
message="The following labels fall below 50% of the target: [2.0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [2.0]},
).to_dict(),
DataCheckWarning(
message="The following labels in the target have severe class imbalance because they fall under 50% of the target and have less than 100 samples: [2.0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE,
details={"target_values": [2.0]},
).to_dict(),
]
@pytest.mark.parametrize("input_type", ["pd", "ww"])
def test_class_imbalance_nonnumeric(input_type):
X = pd.DataFrame()
y_bools = pd.Series([True, False, False, False, False])
y_binary = pd.Series(["yes", "no", "yes", "yes", "yes"])
y_multiclass = pd.Series(
[
"red",
"green",
"red",
"red",
"blue",
"green",
"red",
"blue",
"green",
"red",
"red",
"red",
]
)
y_multiclass_imbalanced_folds = pd.Series(["No", "Maybe", "Maybe", "No", "Yes"])
y_binary_imbalanced_folds = pd.Series(["No", "Yes", "No", "Yes", "No"])
if input_type == "ww":
X.ww.init()
y_bools = ww.init_series(y_bools)
y_binary = ww.init_series(y_binary)
y_multiclass = ww.init_series(y_multiclass)
class_imbalance_check = ClassImbalanceDataCheck(
threshold=0.25, min_samples=1, num_cv_folds=0
)
assert class_imbalance_check.validate(X, y_bools) == [
DataCheckWarning(
message="The following labels fall below 25% of the target: [True]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [True]},
).to_dict()
]
assert class_imbalance_check.validate(X, y_binary) == [
DataCheckWarning(
message="The following labels fall below 25% of the target: ['no']",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": ["no"]},
).to_dict()
]
assert ClassImbalanceDataCheck(threshold=0.35, num_cv_folds=0).validate(
X, y_multiclass
) == [
DataCheckWarning(
message="The following labels fall below 35% of the target: ['green', 'blue']",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": ["green", "blue"]},
).to_dict(),
DataCheckWarning(
message="The following labels in the target have severe class imbalance because they fall under 35% of the target and have less than 100 samples: ['green', 'blue']",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE,
details={"target_values": ["green", "blue"]},
).to_dict(),
]
class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=1)
assert class_imbalance_check.validate(X, y_multiclass_imbalanced_folds) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 2 instances: ['Yes']",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": ["Yes"]},
).to_dict()
]
assert class_imbalance_check.validate(X, y_multiclass) == []
class_imbalance_check = ClassImbalanceDataCheck()
assert class_imbalance_check.validate(X, y_binary_imbalanced_folds) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 6 instances: ['No', 'Yes']",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": ["No", "Yes"]},
).to_dict()
]
assert class_imbalance_check.validate(X, y_multiclass) == [
DataCheckError(
message="The number of instances of these targets is less than 2 * the number of cross folds = 6 instances: ['blue', 'green']",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": ["blue", "green"]},
).to_dict()
]
@pytest.mark.parametrize("input_type", ["pd", "ww"])
def test_class_imbalance_nonnumeric_balanced(input_type):
X = pd.DataFrame()
y_bools_balanced = pd.Series([True, True, True, False, False])
y_binary_balanced = pd.Series(["No", "Yes", "No", "Yes"])
y_multiclass_balanced = pd.Series(
["red", "green", "red", "red", "blue", "green", "red", "blue", "green", "red"]
)
if input_type == "ww":
X.ww.init()
y_bools_balanced = ww.init_series(y_bools_balanced)
y_binary_balanced = ww.init_series(y_binary_balanced)
y_multiclass_balanced = ww.init_series(y_multiclass_balanced)
class_imbalance_check = ClassImbalanceDataCheck(num_cv_folds=1)
assert class_imbalance_check.validate(X, y_multiclass_balanced) == []
assert class_imbalance_check.validate(X, y_binary_balanced) == []
assert class_imbalance_check.validate(X, y_multiclass_balanced) == []
@pytest.mark.parametrize("input_type", ["pd", "ww"])
@pytest.mark.parametrize("min_samples", [1, 20, 50, 100, 500])
def test_class_imbalance_severe(min_samples, input_type):
X = pd.DataFrame()
# 0 will be < 10% of the data, but there will be 50 samples of it
y_values_binary = pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] * 50)
y_values_multiclass = pd.Series(
[0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] * 50
)
if input_type == "ww":
X.ww.init()
y_values_binary = ww.init_series(y_values_binary)
y_values_multiclass = ww.init_series(y_values_multiclass)
class_imbalance_check = ClassImbalanceDataCheck(
min_samples=min_samples, num_cv_folds=1
)
warnings = [
DataCheckWarning(
message="The following labels fall below 10% of the target: [0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": [0]},
).to_dict()
]
if min_samples > 50:
warnings.append(
DataCheckWarning(
message=f"The following labels in the target have severe class imbalance because they fall under 10% of the target and have less than {min_samples} samples: [0]",
data_check_name=class_imbalance_data_check_name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE,
details={"target_values": [0]},
).to_dict()
)
assert class_imbalance_check.validate(X, y_values_binary) == warnings
assert class_imbalance_check.validate(X, y_values_multiclass) == warnings
def test_class_imbalance_large_multiclass():
X = pd.DataFrame()
y_values_multiclass_large = pd.Series(
[0] * 20 + [1] * 25 + [2] * 99 + [3] * 105 + [4] * 900 + [5] * 900
)
y_multiclass_huge = pd.Series([i % 200 for i in range(100000)])
y_imbalanced_multiclass_huge = y_multiclass_huge.append(
pd.Series([200] * 10), ignore_index=True
)
y_imbalanced_multiclass_nan = y_multiclass_huge.append(
| pd.Series([np.nan] * 10) | pandas.Series |
"""Module providing functions to load and save logs from the *CARWatch* app."""
import json
import re
import warnings
import zipfile
from pathlib import Path
from typing import Dict, Optional, Sequence, Union
import pandas as pd
from tqdm.auto import tqdm
from biopsykit.carwatch_logs import LogData
from biopsykit.utils._datatype_validation_helper import _assert_file_extension
from biopsykit.utils._types import path_t
from biopsykit.utils.time import tz, utc
LOG_FILENAME_PATTERN = "logs_(.*?)"
def load_logs_all_subjects(
base_folder: path_t,
has_subject_folders: Optional[bool] = True,
log_filename_pattern: Optional[str] = None,
return_df: Optional[bool] = True,
) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
"""Load log files from all subjects in a folder.
This function iterates through the base folder and looks for subfolders
(if ``has_subject_folders`` is ``True``), or for .csv files or .zip files matching the log file name pattern.
Files from all subjects are then loaded and returned as one :class:`~pandas.DataFrame`
(if ``return_df`` is ``True``) or a dictionary (if ``return_df`` is ``False``).
Parameters
----------
base_folder : str or :class:`~pathlib.Path`
path to base folder containing log files
has_subject_folders : boolean, optional
``True`` if log files are stored in subfolders per subject, ``False`` if they are all stored in one
top-level folder
log_filename_pattern : str, optional
file name pattern of log files as regex string or ``None`` if files have default filename
pattern: "logs_(.*?)". A custom filename pattern needs to contain a capture group to extract the subject ID
return_df : bool, optional
``True`` to return data from all subjects combined as one dataframe, ``False`` to return a dictionary with
data per subject. Default: ``True``
Returns
-------
:class:`~pandas.DataFrame` or dict
dataframe with log data for all subjects (if ``return_df`` is ``True``).
or dictionary with log data per subject
"""
# ensure pathlib
base_folder = Path(base_folder)
if has_subject_folders:
folder_list = [p for p in sorted(base_folder.glob("*")) if p.is_dir() and not p.name.startswith(".")]
dict_log_files = _load_log_file_folder(folder_list)
else:
# first, look for available csv files
file_list = list(sorted(base_folder.glob("*.csv")))
if len(file_list) > 0:
dict_log_files = _load_log_file_csv(file_list, log_filename_pattern)
else:
# fallback: look for zip files
file_list = list(sorted(base_folder.glob("*.zip")))
dict_log_files = _load_log_file_zip(file_list, log_filename_pattern)
if return_df:
return pd.concat(dict_log_files, names=["subject_id"])
return dict_log_files
def _load_log_file_folder(folder_list: Sequence[Path]):
dict_log_files = {}
for folder in tqdm(folder_list):
subject_id = folder.name
dict_log_files[subject_id] = load_log_one_subject(folder)
return dict_log_files
def _load_log_file_csv(file_list: Sequence[Path], log_filename_pattern: str):
dict_log_files = {}
if log_filename_pattern is None:
log_filename_pattern = LOG_FILENAME_PATTERN + ".csv"
for file in tqdm(file_list):
subject_id = re.search(log_filename_pattern, file.name).group(1)
df = pd.read_csv(file, sep=";")
df["time"] = | pd.to_datetime(df["time"]) | pandas.to_datetime |
__author__ = "<NAME>"
__copyright__ = "BMW Group"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from tsa import Logger
import sys
import numpy as np
import pandas as pd
import datetime
from dateutil.relativedelta import relativedelta
import argparse
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from statsmodels.tsa.seasonal import seasonal_decompose
from copy import copy, deepcopy
from scipy import stats
class UVariateTimeSeriesClass(object):
"""
Uni-variate time series class
Attributes:
_ts_df_cols - internal column names for dataframe that will be input to model
ts_df - time series data frame
freq - frequency of time series, possibilities ['S', 'min', 'H', 'D', 'W', 'M']
p_train - float value defining which part of data is to be used as training data. Note, value of 1.0 would mean
all data will be used as training data,
hence no test data will be generated.
timeformat - time format if time series data needs to be brought into datetime
#
_mode - defines the mode as 'test' or 'forecast'
_train_dt - training data
_test_dt - test data
model_fit - fitted model
fittedvalues - computed fitted values
residuals - residuals
rmse - RMSE on test set (test data and the forecast on test data)
upper_whisker_res - upper whisker for residuals
lower_conf_int - upper confidence interval
upper_conf_int - lower confidence interval
forecast - computed forcatsed values
residuals_forecast - residuals between forecasted and real values. Note, this variable exist only if test data
existed
Methods:
ts_transform() - transforms time series using log10 or box-cox
ts_resample() - resamples time series at the chosen frequency freq
_plot_residuals() - residual plots helper function
ts_test() - evaluates fitted model on the test data, if this one has been generated
ts_forecast() - forecasts time series and plots the results
_plot_forecast() - helper function for plotting forecasted time-series
ts_decompose() - decomposes time series in seasonal, trend and resduals and plots the results
plot_decompose() - plots the results of ts_decompose()
Helper methods:
_prepare_fit() - prepares ts_fit of child class. Supposed to be called by a child class
_residuals() - helper function for calculating residuals. Supposed to be called by a child class
_check_ts_test() - checks for test. Supposed to be called by a child class
_check_ts_forecast() - checks for forecast. Supposed to be called by a child class
"""
def __init__(self, ts_df, time_format="%Y-%m-%d %H:%M:%S", freq='D', p_train=1.0, **kwds):
"""
Initializes the object UVariateTimeSeriesForecaster
"""
self._ts_df_cols = ['ds', 'y']
self.ts_df = ts_df
self.time_format = time_format
self.freq = freq
self.p_train = p_train
self.transform = None
self._boxcox_lmbda = None
self._mode = ''
self._train_dt = None
self._test_dt = None
self.model_fit = None
self.fittedvalues = None
self.residuals = None
self.rmse = None
self.upper_whisker_res = None
self.lower_conf_int = None
self.upper_conf_int = None
self.forecast = None
self.residuals_forecast = None
self.seasonal = None
self.trend = None
self.baseline = None
self._uvts_cls_logger = Logger('uvts_cls')
# Assertion Tests
try:
assert self.freq in ['S', 'min', 'H', 'D', 'W', 'M']
except AssertionError:
self._uvts_cls_logger.warning("freq should be in ['S', 'min', 'H', 'D', W', 'M']. "
"Assuming daily frequency!")
self.freq = 'D'
try:
self.p_train = float(self.p_train)
assert self.p_train > 0
except AssertionError:
self._uvts_cls_logger.error("p_train defines part of data on which you would train your model."
"This value cannot be less than or equal to zero!")
self._uvts_cls_logger.exception("Exception occurred, p_train")
except ValueError:
self._uvts_cls_logger.error("p_train must be convertible to float type!")
self._uvts_cls_logger.exception("Exception occurred, p_train")
else:
if int(self.p_train) < 1:
self._mode = 'test'
else:
self._mode = 'forecast'
try:
assert pd.DataFrame(self.ts_df).shape[1] <= 2
except AssertionError:
self._uvts_cls_logger.error(
"Time series must be uni-variate. "
"Hence, at most a time columns and a column of numeric values are expected!")
self._uvts_cls_logger.exception("Exception occurred, ts_df")
else:
self.ts_df = self.ts_df.reset_index()
self.ts_df.columns = self._ts_df_cols
self.ts_df['y'] = self.ts_df['y'].apply(np.float64, errors='coerce')
self.ts_df.set_index('ds', inplace=True)
print(type(self._uvts_cls_logger))
print(self._uvts_cls_logger)
self._uvts_cls_logger.info("Using time series data of range: " + str(min(self.ts_df.index)) + ' - ' + str(
max(self.ts_df.index)) + " and shape: " + str(self.ts_df.shape))
if not isinstance(self.ts_df.index, pd.DatetimeIndex):
self._uvts_cls_logger.warning("Time conversion required...")
self.ts_df = self.ts_df.reset_index()
try:
self.ts_df['ds'] = self.ts_df['ds'].apply(
lambda x: datetime.datetime.strptime(
str(x).translate({ord('T'): ' ', ord('Z'): None})[:-1],
self.time_format))
except ValueError as e:
self._uvts_cls_logger.warning("Zulu time conversion not successful: {}".format(e))
self._uvts_cls_logger.warning("Will try without assuming zulu time...")
try:
self.ts_df['ds'] = self.ts_df['ds'].apply(
lambda x: datetime.datetime.strptime(str(x), self.time_format))
except ValueError as e:
self._uvts_cls_logger.info("Time conversion not successful. Check your time_format: {}".format(e))
else:
self._uvts_cls_logger.info("Time conversion successful!")
else:
self._uvts_cls_logger.info("Time conversion successful!")
# set index
self.ts_df.set_index('ds', inplace=True)
#
self.ts_df.index = pd.to_datetime(self.ts_df.index)
self.ts_df.sort_index(inplace=True)
# resample
self.ts_resample()
# delegate
super(UVariateTimeSeriesClass, self).__init__(**kwds)
def __copy__(self):
"""
Copies the object
"""
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
"""
Deepcopies the object
"""
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
def ts_transform(self, transform):
"""
Transforms time series via applying casted 'transform'. Right now 'log10' and 'box-cox' possible.
"""
try:
assert transform.lower().strip() in ['log10', 'box-cox']
except AssertionError:
self._uvts_cls_logger.error(
"transform should be in ['log10', 'box-cox'] or empty. Assuming no transform! "
"Hence, if you get bad results, you would like maybe to choose e.g., log10 here.")
self._uvts_cls_logger.exception("Assertion exception occurred, transform")
self.transform = None
else:
self.transform = transform.lower()
# transform
if self.transform == 'log10':
try:
self.ts_df['y'] = self.ts_df['y'].apply(np.log10)
except ValueError:
self._uvts_cls_logger.exception("log10 transformation did not work! Possibly negative "
"values present?")
elif self.transform == 'box-cox':
if input("Do you want to provide lambda for box.cox? y/n?").strip().lower() == 'y':
self._boxcox_lmbda = float(input())
else:
self._boxcox_lmbda = None
try:
if self._boxcox_lmbda is None:
bc, lmbda_1 = stats.boxcox(self.ts_df['y'], lmbda=self._boxcox_lmbda)
self.ts_df['y'] = stats.boxcox(self.ts_df['y'], lmbda=lmbda_1)
else:
self.ts_df['y'] = stats.boxcox(self.ts_df['y'], lmbda=self._boxcox_lmbda)
except ValueError:
self._uvts_cls_logger.exception("box-cox transformation did not work! "
"Possibly negative values present or bad lmbda?")
return self
def set_frequency(self, new_freq):
"""
Sets new frequency and resamples time series to that new frequency
"""
try:
assert new_freq in ['S', 'min', 'H', 'D', 'W', 'M']
except AssertionError:
self._uvts_cls_logger.error("frequency should be in ['S', 'min', 'H', 'D', W', 'M']")
else:
self.freq = new_freq
self.ts_resample()
def ts_check_frequency(self):
"""
Checks the frequency of time series
"""
if self.ts_df.index.freq is None:
self._uvts_cls_logger.info("No specific frequency detected.")
self._uvts_cls_logger.info("Frequency chosen in initialization: " + str(
self.freq) + " enter 'n' and call ts_resample() if you are satisfied with this value.")
if input("Should a histogram of time deltas be plotted y/n?").strip().lower() == 'y':
ff = pd.Series(self.ts_df.index[1:(len(self.ts_df))] - self.ts_df.index[0:(len(self.ts_df) - 1)])
ff = ff.apply(lambda x: int(x.total_seconds() / (60 * 60)))
plt.hist(ff, bins=120)
plt.xlabel("Rounded time delta [H]")
plt.ylabel("Frequency of occurrence")
self._uvts_cls_logger.info(ff.value_counts())
self._uvts_cls_logger.info("Should hourly frequency not fit, choose a reasonable frequency and call "
"set_frequency(new_freq)")
else:
pass
else:
self._uvts_cls_logger.info("Time series frequency: " + str(self.ts_df.index.freq))
def ts_resample(self):
"""
Brings original time series to the chosen frequency freq
"""
ts_freq = pd.DataFrame(
index=pd.date_range(self.ts_df.index[0], self.ts_df.index[len(self.ts_df) - 1], freq=self.freq),
columns=['dummy'])
self.ts_df = ts_freq.join(self.ts_df).drop(['dummy'], axis=1)
self.ts_df.y = self.ts_df.y.fillna(method='ffill')
# if np.isnan ( self.ts_df.y ).any ():
# self.ts_df.y = self.ts_df.y.fillna ( method='bfill' )
if np.isnan(self.ts_df.y).any():
self._uvts_cls_logger.warning("Some NaN found, something went wrong, check the data!")
sys.exit(-1)
self._uvts_cls_logger.info("Time series resampled at frequency: " + str(self.ts_df.index.freq) +
". New shape of the data: " + str(self.ts_df.shape))
return self
def _prepare_fit(self):
"""
Prepares data for training or forecasting modes
"""
if self.ts_df.index.freq is None:
self._uvts_cls_logger.warning("Time series exhibit no frequency. Calling ts_resample()...")
try:
self.ts_resample()
except ValueError:
self._uvts_cls_logger.error("Resample did not work! Error:" + str(sys.exc_info()[0]))
sys.exit("STOP")
ts_df = self.ts_df
ts_test_df = pd.DataFrame()
if self._mode == 'forecast' or int(self.p_train) == 1:
self._train_dt = ts_df
self._test_dt = ts_test_df
elif self._mode == 'test' and int(self.p_train) < 1:
# split
ts_df = ts_df.reset_index()
ts_df.columns = self._ts_df_cols
ts_test_df = ts_df
# training
ts_df = pd.DataFrame(ts_df.loc[:int(self.p_train * len(ts_df) - 1), ])
ts_df.set_index('ds', inplace=True)
# test
ts_test_df = pd.DataFrame(ts_test_df.loc[int(self.p_train * len(ts_test_df)):, ])
ts_test_df.set_index('ds', inplace=True)
# now set
self._train_dt = ts_df
if not ts_test_df.empty:
self._test_dt = ts_test_df
return self
def _residuals(self):
"""
Calculate residuals
"""
if self.model_fit is None:
self._uvts_cls_logger.error("No model has been fitted, residuals cannot be computed!")
sys.exit("STOP")
try:
# use fittedvalues to fill in the model dictionary
self.residuals = pd.Series(np.asarray(self._train_dt['y']) - np.asarray(self.fittedvalues).flatten(),
index=self._train_dt['y'].index)
self.upper_whisker_res = self.residuals.mean() + 1.5 * (
self.residuals.quantile(0.75) - self.residuals.quantile(0.25))
except (KeyError, AttributeError):
self._uvts_cls_logger.exception("Exception occurred: Model was not fitted or ts has other structure")
return self
def _plot_residuals(self, y, yhat, _id):
"""
Plot the residuals
"""
try:
assert self.model_fit is not None
except AssertionError:
self._uvts_cls_logger.exception("Model has to be fitted first! Please call ts_fit(...)")
fig, axes = plt.subplots(2, 1, figsize=(20, 5), sharex=True)
axes[0].plot(pd.Series(yhat, index=self._train_dt.index), color='y', linewidth=2.0)
axes[0].plot(pd.Series(y, index=self._train_dt.index), color='b')
axes[0].set_ylabel("Model Fit")
axes[0].set_title("Real (blue) and estimated values, " + str(_id))
#
axes[1].plot(self.residuals, color="r")
if self.forecast is not None and self.residuals_forecast is None \
and self.lower_conf_int is not None and self.upper_conf_int is not None:
axes[0].fill_between(self.lower_conf_int.index, self.lower_conf_int, self.upper_conf_int, color='k',
alpha=.15)
if self.upper_whisker_res is not None:
axes[1].axhline(y=self.upper_whisker_res, xmin=0, xmax=1, color='m', label='upper_whisker', linestyle='--',
linewidth=1.5)
axes[1].axhline(y=-self.upper_whisker_res, xmin=0, xmax=1, color='m', label='upper_whisker', linestyle='--',
linewidth=1.5)
axes[1].set_ylabel('Residuals')
axes[1].set_title('Difference between model output and the real data and +/- upper whisker, ' + str(_id))
return fig, axes
def _check_ts_test(self):
"""
Check before ts_test is child class is called
"""
try:
assert self.model_fit is not None
except AssertionError:
self._uvts_cls_logger.exception("Model has to be fitted first! Please call ts_fit(...)")
try:
assert self._test_dt is not None
except(KeyError, AssertionError):
self._uvts_cls_logger.exception("Nothing to validate. "
"Call ts_forecast() or specify amount of training data "
"when initializing the object.")
return -1
else:
self._mode = 'test'
return 0
def _check_ts_forecast(self, n_forecast):
"""
Check before ts_forecast in child class is called
"""
#
try:
n_forecast = int(n_forecast)
assert 0 < n_forecast < len(self._train_dt)
except AssertionError:
self._uvts_cls_logger.exception("Number of periods to be forecasted is too low, too high or not numeric!")
except ValueError:
self._uvts_cls_logger.exception("n_forecast must be convertible to int type!")
try:
assert self.model_fit is not None
except AssertionError:
self._uvts_cls_logger.exception("Model has to be fitted first! Please call ts_fit(...)")
return n_forecast
def _gen_idx_future(self, n_forecast):
idx_future = None
if self.freq == 'S':
idx_future = pd.date_range(start=max(self._train_dt.index),
end=max(self._train_dt.index) + datetime.timedelta(
seconds=n_forecast - 1), freq='S')
elif self.freq == 'min':
idx_future = pd.date_range(start=max(self._train_dt.index),
end=max(self._train_dt.index) + datetime.timedelta(
minutes=n_forecast - 1), freq='min')
elif self.freq == 'H':
idx_future = pd.date_range(start=max(self._train_dt.index),
end=max(self._train_dt.index) + datetime.timedelta(
hours=n_forecast - 1), freq='H')
elif self.freq == 'D':
idx_future = pd.date_range(start=max(self._train_dt.index),
end=max(self._train_dt.index) + datetime.timedelta(
days=n_forecast - 1), freq='D')
elif self.freq == 'W':
idx_future = pd.date_range(start=max(self._train_dt.index),
end=max(self._train_dt.index) + datetime.timedelta(
weeks=n_forecast - 1), freq='W')
elif self.freq == 'M':
idx_future = pd.date_range(start=max(self._train_dt.index),
end=max(self._train_dt.index) + relativedelta(months=+(n_forecast - 1)),
freq='M')
return idx_future
def _plot_forecast(self, y, yhat, forecast, _id):
"""
Plot forecasted values
"""
try:
assert self.model_fit is not None
except AssertionError:
self._uvts_cls_logger.exception("Model has to be fitted first! Please call ts_fit(...)")
sys.exit("STOP")
#
try:
assert self.forecast is not None
except AssertionError:
self._uvts_cls_logger.exception("Neither ts_test(...) nor ts_forecast(...) have been called yet!")
sys.exit("STOP")
fig, axes = plt.subplots(2, 1, figsize=(20, 7), sharex=True)
#
axes[0].plot(pd.Series(yhat, index=self._train_dt.index), color='y', linewidth=2.0)
axes[0].plot(pd.Series(y, index=self._train_dt.index), color='b')
#
if self.residuals_forecast is not None:
axes[0].plot(self.ts_df, color='b')
axes[0].plot(forecast, color='darkgreen')
#
if self.lower_conf_int is not None and self.upper_conf_int is not None:
axes[0].fill_between(self.lower_conf_int.index,
self.lower_conf_int,
self.upper_conf_int,
color='k', alpha=.15)
axes[0].set_ylabel("Fit and Forecast/Validation")
axes[0].set_title("Real (blue), estimated (yellow) and forecasted values, " + str(_id))
#
if self.residuals_forecast is not None:
axes[1].plot( | pd.concat([self.residuals, self.residuals_forecast], axis=0) | pandas.concat |
import os
import geopandas as gpd
import numpy as np
import pandas as pd
from subprocess import call
from shapely.geometry import Point
from sklearn.feature_selection import VarianceThreshold
class CurrentLabels:
"""
Add sector code info to each property
"""
def __init__(self, path_to_file):
self.df = pd.read_csv(path_to_file, dtype='str')
def adjust_nas(self):
self.df = (self.df
.fillna(value={'model_decision': 'NA_string',
'analyst_decision': 'NA_string'})
.dropna(subset=['coordinates']).reset_index(drop=True)
)
def create_long_lant_cols(self):
self.df['long'] = pd.to_numeric(self.df.coordinates.str.split(',', expand=True).loc[:,0].str.replace('\(', ''))
self.df['lat'] = pd.to_numeric(self.df.coordinates.str.split(',', expand=True).loc[:,1].str.replace('\)', ''))
self.df['state'] = self.df.concat.apply(lambda row: row.split(',')[-1].lower().strip())
self.df['coordinate_point'] = pd.Series([], dtype='object')
for idx, row in self.df.iterrows():
self.df.loc[idx, 'coordinate_point'] = Point(row.long, row.lat)
def drop_cols(self):
self.df = self.df.drop(columns=['zip_code', 'coordinates', 'Unnamed: 0'])
def join_sector_code(self):
def join_code_sector_inner(df):
assert len(df.state.unique()) == 1, ('Más de un estado presente en la base')
state = df.state.unique()[0]
inner_df = df.copy()
if state in os.listdir('data/sharp'):
file_name = [file for file in os.listdir('data/sharp/'+state) if file.find('.shp')>0][0]
census_sector = gpd.read_file('data/sharp/{0:s}/{1:s}'.format(state, file_name), encoding='latin1')
inner_df['census_code'] = inner_df['coordinate_point'].apply(lambda row: census_sector.loc[census_sector.contains(row), 'CD_GEOCODI'].values).str[0]
else :
inner_df['census_code'] = np.nan
return inner_df
self.df = (self.df
.assign(state_index=lambda x: x.state)
.groupby('state_index')
.apply(lambda df: join_code_sector_inner(df))
.reset_index(drop=True)
)
def save_df(self, path_to_save='data/procesada/data_with_index.pkl'):
self.df.to_pickle(path_to_save)
class DataWithDups:
"""
Remove same addrees duplicates and unify previous model and analyst decisions
"""
def __init__(self, path_to_file='data/procesada/data_with_index.pkl'):
self.df = pd.read_pickle(path_to_file)
def drop_nas_in_sector(self):
self.df = self.df.dropna(subset=['census_code'])
def print_dups(self):
print('{0:.1%} de la base tiene duplicados'
.format(self.df
.duplicated(subset=['lat', 'long', 'concat'], keep=False)
.mean())
)
def unify_decision(self):
self.df = (self.df
.assign(final_decision=lambda x: np.where(x.analyst_decision.isin(['A', 'R']),
x.analyst_decision,
np.where(x.model_decision.isin(['A', 'R']),
x.model_decision,
'undefined')))
.drop(columns=['model_decision', 'analyst_decision'])
)
def remove_duplicates(self):
self.df = (self.df
.assign(uno=1)
.groupby(['state','census_code', 'concat', 'lat', 'long','final_decision'])
.agg(count=('uno', sum))
.reset_index()
.assign(random_index=lambda x: np.random.normal(size=x.shape[0]))
.sort_values(by=['state', 'concat', 'lat', 'long','count', 'random_index'], ascending=False)
.drop_duplicates(subset=['census_code', 'concat', 'state', 'lat', 'long'], keep='first')
.drop(columns=['count', 'random_index'])
.reset_index(drop=True)
)
def save_df(self, path_to_save='data/procesada/data_with_index_nodups.pkl'):
self.df.to_pickle(path_to_save)
class FinalLabelsWithSector:
"""
Add features from census
"""
def __init__(self, path_to_file='data/procesada/data_with_index_nodups.pkl'):
self.df = pd.read_pickle(path_to_file)
self.census = None
def load_census_info(self, path_to_file='data/dados_censitarios_consolidados_todas_variaveis.csv'):
self.census = pd.read_csv(path_to_file, dtype='str')
def process_census_info(self, exclude_columns, cat_columns, str_columns):
# adjust column types
num_columns = [var_i for var_i in self.census.columns if var_i not in cat_columns + str_columns]
for cat_i in cat_columns:
self.census[cat_i] = self.census[cat_i].astype('category')
for num_i in num_columns:
self.census[num_i] = pd.to_numeric(self.census[num_i].str.replace(',', '.'), errors='coerce')
# drop excluded columns
self.census = self.census.drop(columns=exclude_columns)
# hot encoding category columns
self.census = | pd.get_dummies(self.census, columns=cat_columns) | pandas.get_dummies |
import numpy as np
import pandas as pd
import os, time, sys, multiprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
sys.path.append("..")
from CTGCN.utils import check_and_make_path
class DataGenerator(object):
base_path: str
input_base_path: str
output_base_path: str
full_node_list: list
node_num: int
test_ratio: float
val_ratio: float
def __init__(self, base_path, input_folder, output_folder, node_file, test_ratio=0.3, val_ratio=0.2):
self.base_path = base_path
self.input_base_path = os.path.join(base_path, input_folder)
self.output_base_path = os.path.join(base_path, output_folder)
nodes_set = pd.read_csv(os.path.join(base_path, node_file), names=['node'])
self.full_node_list = nodes_set['node'].tolist()
self.node_num = len(self.full_node_list)
assert test_ratio + val_ratio < 1.0
self.test_ratio = test_ratio
self.val_ratio = val_ratio
check_and_make_path(self.input_base_path)
check_and_make_path(self.output_base_path)
return
def get_neg_edge_samples(self, pos_edges, edge_num, all_edge_dict):
neg_edge_dict = dict()
neg_edge_list = []
cnt = 0
while cnt < edge_num:
from_id = np.random.choice(self.node_num)
to_id = np.random.choice(self.node_num)
if from_id == to_id:
continue
if (from_id, to_id) in all_edge_dict or (to_id, from_id) in all_edge_dict:
continue
if (from_id, to_id) in neg_edge_dict or (to_id, from_id) in neg_edge_dict:
continue
neg_edge_list.append([from_id, to_id, 0])
cnt += 1
neg_edges = np.array(neg_edge_list)
all_edges = np.vstack([pos_edges, neg_edges])
return all_edges
def generate_edge_samples(self):
print('Start generating edge samples!')
node2idx_dict = dict(zip(self.full_node_list, np.arange(self.node_num).tolist()))
f_list = sorted(os.listdir(self.input_base_path))
for i, file in enumerate(f_list):
# print('i = ', i, ', file = ', file)
file_path = os.path.join(self.input_base_path, file)
date = file.split('.')[0]
all_edge_dict = dict()
edge_list = []
with open(file_path, 'r') as fp:
content_list = fp.readlines()
for line in content_list[1:]:
edge = line.strip().split('\t')
from_id = node2idx_dict[edge[0]]
to_id = node2idx_dict[edge[1]]
key = (from_id, to_id)
all_edge_dict[key] = 1
edge_list.append([from_id, to_id, 1])
key = (to_id, from_id)
all_edge_dict[key] = 1
edge_list.append([to_id, from_id, 1])
edges = np.array(edge_list)
del edge_list
edge_num = edges.shape[0]
all_edge_idxs = np.arange(edge_num)
np.random.shuffle(all_edge_idxs)
test_num = int(np.floor(edge_num * self.test_ratio))
val_num = int(np.floor(edge_num * self.val_ratio))
train_num = edge_num - test_num - val_num
val_edges = edges[all_edge_idxs[ : val_num]]
test_edges = edges[all_edge_idxs[val_num : val_num + test_num]]
train_edges = edges[all_edge_idxs[val_num + test_num : ]]
del edges
train_edges = self.get_neg_edge_samples(train_edges, train_num, all_edge_dict)
test_edges = self.get_neg_edge_samples(test_edges, test_num, all_edge_dict)
val_edges = self.get_neg_edge_samples(val_edges, val_num, all_edge_dict)
train_output_path = os.path.join(self.output_base_path, date + '_train.csv')
df_train = pd.DataFrame(train_edges, columns=['from_id', 'to_id', 'label'])
df_train.to_csv(train_output_path, sep='\t', index=False)
test_output_path = os.path.join(self.output_base_path, date + '_test.csv')
df_test = pd.DataFrame(test_edges, columns=['from_id', 'to_id', 'label'])
df_test.to_csv(test_output_path, sep='\t', index=False)
val_output_path = os.path.join(self.output_base_path, date + '_val.csv')
df_val = pd.DataFrame(val_edges, columns=['from_id', 'to_id', 'label'])
df_val.to_csv(val_output_path, sep='\t', index=False)
print('Generate edge samples finish!')
class LinkPredictor(object):
base_path: str
origin_base_path: str
origin_base_path: str
embedding_base_path: str
lp_edge_base_path: str
output_base_path: str
full_node_list: list
train_ratio: float
test_ratio: float
def __init__(self, base_path, origin_folder, embedding_folder, lp_edge_folder, output_folder, node_file, train_ratio=1.0, test_ratio=1.0):
self.base_path = base_path
self.origin_base_path = os.path.join(base_path, origin_folder)
self.embedding_base_path = os.path.join(base_path, embedding_folder)
self.lp_edge_base_path = os.path.join(base_path, lp_edge_folder)
self.output_base_path = os.path.join(base_path, output_folder)
self.train_ratio = train_ratio
self.test_ratio = test_ratio
nodes_set = pd.read_csv(os.path.join(base_path, node_file), names=['node'])
self.full_node_list = nodes_set['node'].tolist()
check_and_make_path(self.embedding_base_path)
check_and_make_path(self.origin_base_path)
check_and_make_path(self.output_base_path)
return
def get_edge_feature(self, edge_arr, embedding_arr):
avg_edges, had_edges, l1_edges, l2_edges = [], [], [], []
for i, edge in enumerate(edge_arr):
from_id, to_id = edge[0], edge[1]
avg_edges.append((embedding_arr[from_id] + embedding_arr[to_id]) / 2)
had_edges.append(embedding_arr[from_id] * embedding_arr[to_id])
l1_edges.append(np.abs(embedding_arr[from_id] - embedding_arr[to_id]))
l2_edges.append((embedding_arr[from_id] - embedding_arr[to_id]) ** 2)
avg_edges = np.array(avg_edges)
had_edges = np.array(had_edges)
l1_edges = np.array(l1_edges)
l2_edges = np.array(l2_edges)
feature_dict = {'Avg': avg_edges, 'Had': had_edges, 'L1': l1_edges, 'L2': l2_edges}
return feature_dict
def train(self, train_edges, val_edges, embeddings):
#print('Start training!')
train_edge_num = train_edges.shape[0]
if self.train_ratio < 1.0:
# print('train ratio < 1. 0')
sample_num = int(train_edge_num * self.ratio)
sampled_idxs = np.random.choice(np.arange(train_edge_num), sample_num).tolist()
train_edges = train_edges[sampled_idxs, :]
train_labels = train_edges[:, 2]
val_labels = val_edges[:, 2]
train_feature_dict = self.get_edge_feature(train_edges, embeddings)
val_feature_dict = self.get_edge_feature(val_edges, embeddings)
measure_list = ['Avg', 'Had', 'L1', 'L2']
model_dict = dict()
for measure in measure_list:
models = []
for C in [0.01, 0.1, 1, 10]:
model = LogisticRegression(C=C, solver='lbfgs', max_iter=5000, class_weight='balanced')
model.fit(train_feature_dict[measure], train_labels)
models.append(model)
best_auc = 0
model_idx = -1
for i, model in enumerate(models):
val_pred = model.predict_proba(val_feature_dict[measure])[:, 1]
auc = roc_auc_score(val_labels, val_pred)
if auc >= best_auc:
best_auc = auc
model_idx = i
#print('model_idx = ', model_idx, ', best_auc=', best_auc)
model_dict[measure] = models[model_idx]
#print('Finish training!')
return model_dict
def test(self, test_edges, embeddings, model_dict, date):
#print('Start testing!')
test_edge_num = test_edges.shape[0]
if self.test_ratio < 1.0:
# print('test ratio < 1. 0')
sample_num = int(test_edge_num * self.ratio)
sampled_idxs = np.random.choice(np.arange(test_edge_num), sample_num).tolist()
test_edges = test_edges[sampled_idxs, :]
test_labels = test_edges[:, 2]
test_feature_dict = self.get_edge_feature(test_edges, embeddings)
auc_list = [date]
measure_list = ['Avg', 'Had', 'L1', 'L2']
for measure in measure_list:
test_pred = model_dict[measure].predict_proba(test_feature_dict[measure])[:, 1]
auc_list.append(roc_auc_score(test_labels, test_pred))
#print('Finish testing!')
return auc_list
def link_prediction_all_time(self, method):
print('method = ', method)
f_list = sorted(os.listdir(self.origin_base_path))
f_num = len(f_list)
# model_dict = dict()
all_auc_list = []
for i, f_name in enumerate(f_list):
if i == 0:
continue
print('Current date is: {}'.format(f_name))
date = f_name.split('.')[0]
train_edges = pd.read_csv(os.path.join(self.lp_edge_base_path, date + '_train.csv'), sep='\t').values
val_edges = pd.read_csv(os.path.join(self.lp_edge_base_path, date + '_val.csv'), sep='\t').values
test_edges = pd.read_csv(os.path.join(self.lp_edge_base_path, date + '_test.csv'), sep='\t').values
pre_f_name = f_list[i - 1]
#print('pre_f_name: ', f_list[i - 1], ', f_name: ', f_name)
if not os.path.exists(os.path.join(self.embedding_base_path, method, pre_f_name)):
continue
df_embedding = pd.read_csv(os.path.join(self.embedding_base_path, method, pre_f_name), sep='\t', index_col=0)
df_embedding = df_embedding.loc[self.full_node_list]
# node_num = len(self.full_node_list)
# for j in range(node_num):
# assert df_embedding.index[j] == self.full_node_list[j]
embeddings = df_embedding.values
#print('YES')
model_dict = self.train(train_edges, val_edges, embeddings)
auc_list = self.test(test_edges, embeddings, model_dict, date)
all_auc_list.append(auc_list)
df_output = pd.DataFrame(all_auc_list, columns=['date', 'Avg', 'Had', 'L1', 'L2'])
print(df_output)
print('method = ', method, ', average AUC of Had: ', df_output['Had'].mean())
output_file_path = os.path.join(self.output_base_path, method + '_auc_record.csv')
df_output.to_csv(output_file_path, sep=',', index=False)
def link_prediction_all_method(self, method_list=None, worker=-1):
print('Start link prediction!')
if method_list is None:
method_list = os.listdir(self.embedding_base_path)
if worker <= 0:
for method in method_list:
print('Current method is :{}'.format(method))
self.link_prediction_all_time(method)
else:
worker = min(worker, os.cpu_count())
pool = multiprocessing.Pool(processes=worker)
print("\tstart " + str(worker) + " worker(s)")
for method in method_list:
pool.apply_async(self.link_prediction_all_time, (method,))
pool.close()
pool.join()
print('Finish link prediction!')
def process_result(dataset, rep_num, method_list):
for method in method_list:
base_path = os.path.join('../../data/' + dataset, 'link_prediction_res_0')
res_path = os.path.join(base_path, method + '_auc_record.csv')
df_method = | pd.read_csv(res_path, sep=',', header=0, names=['date', 'avg0', 'had0', 'l1_0', 'l2_0']) | pandas.read_csv |
import types
import warnings
import pickle
import re
from copy import deepcopy
from functools import partial, wraps
from inspect import signature
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
import joblib
from . import IS_PYPY
from .. import config_context
from ._testing import _get_args
from ._testing import assert_raise_message
from ._testing import assert_array_equal
from ._testing import assert_array_almost_equal
from ._testing import assert_allclose
from ._testing import assert_allclose_dense_sparse
from ._testing import assert_array_less
from ._testing import set_random_state
from ._testing import SkipTest
from ._testing import ignore_warnings
from ._testing import create_memmap_backed_data
from ._testing import raises
from . import is_scalar_nan
from ..linear_model import LinearRegression
from ..linear_model import LogisticRegression
from ..linear_model import RANSACRegressor
from ..linear_model import Ridge
from ..base import (
clone,
ClusterMixin,
is_classifier,
is_regressor,
is_outlier_detector,
RegressorMixin,
_is_pairwise,
)
from ..metrics import accuracy_score, adjusted_rand_score, f1_score
from ..random_projection import BaseRandomProjection
from ..feature_selection import SelectKBest
from ..pipeline import make_pipeline
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..exceptions import SkipTestWarning
from ..model_selection import train_test_split
from ..model_selection import ShuffleSplit
from ..model_selection._validation import _safe_split
from ..metrics.pairwise import rbf_kernel, linear_kernel, pairwise_distances
from ..utils.fixes import threadpool_info
from ..utils.validation import check_is_fitted
from . import shuffle
from ._tags import (
_DEFAULT_TAGS,
_safe_tags,
)
from .validation import has_fit_parameter, _num_samples
from ..preprocessing import StandardScaler
from ..preprocessing import scale
from ..datasets import (
load_iris,
make_blobs,
make_multilabel_classification,
make_regression,
)
REGRESSION_DATASET = None
CROSS_DECOMPOSITION = ["PLSCanonical", "PLSRegression", "CCA", "PLSSVD"]
def _yield_checks(estimator):
name = estimator.__class__.__name__
tags = _safe_tags(estimator)
pairwise = _is_pairwise(estimator)
yield check_no_attributes_set_in_init
yield check_estimators_dtypes
yield check_fit_score_takes_y
if has_fit_parameter(estimator, "sample_weight"):
yield check_sample_weights_pandas_series
yield check_sample_weights_not_an_array
yield check_sample_weights_list
if not pairwise:
# We skip pairwise because the data is not pairwise
yield check_sample_weights_shape
yield check_sample_weights_not_overwritten
yield partial(check_sample_weights_invariance, kind="ones")
yield partial(check_sample_weights_invariance, kind="zeros")
yield check_estimators_fit_returns_self
yield partial(check_estimators_fit_returns_self, readonly_memmap=True)
# Check that all estimator yield informative messages when
# trained on empty datasets
if not tags["no_validation"]:
yield check_complex_data
yield check_dtype_object
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION:
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if not tags["allow_nan"] and not tags["no_validation"]:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if pairwise:
# Check that pairwise estimator throws error on non-square input
yield check_nonsquare_error
yield check_estimators_overwrite_params
if hasattr(estimator, "sparsify"):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
yield check_estimator_get_tags_default_keys
def _yield_classifier_checks(classifier):
tags = _safe_tags(classifier)
# test classifiers can handle non-array data and pandas objects
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
if tags["multioutput"]:
yield check_classifier_multioutput
# basic consistency testing
yield check_classifiers_train
yield partial(check_classifiers_train, readonly_memmap=True)
yield partial(check_classifiers_train, readonly_memmap=True, X_dtype="float32")
yield check_classifiers_regression_target
if tags["multilabel"]:
yield check_classifiers_multilabel_representation_invariance
yield check_classifiers_multilabel_output_format_predict
yield check_classifiers_multilabel_output_format_predict_proba
yield check_classifiers_multilabel_output_format_decision_function
if not tags["no_validation"]:
yield check_supervised_y_no_nan
if not tags["multioutput_only"]:
yield check_supervised_y_2d
if tags["requires_fit"]:
yield check_estimators_unfitted
if "class_weight" in classifier.get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
# test if predict_proba is a monotonic transformation of decision_function
yield check_decision_proba_consistency
@ignore_warnings(category=FutureWarning)
def check_supervised_y_no_nan(name, estimator_orig):
# Checks that the Estimator targets are not NaN.
estimator = clone(estimator_orig)
rng = np.random.RandomState(888)
X = rng.standard_normal(size=(10, 5))
for value in [np.nan, np.inf]:
y = np.full(10, value)
y = _enforce_estimator_tags_y(estimator, y)
module_name = estimator.__module__
if module_name.startswith("sklearn.") and not (
"test_" in module_name or module_name.endswith("_testing")
):
# In scikit-learn we want the error message to mention the input
# name and be specific about the kind of unexpected value.
if np.isinf(value):
match = (
r"Input (y|Y) contains infinity or a value too large for"
r" dtype\('float64'\)."
)
else:
match = r"Input (y|Y) contains NaN."
else:
# Do not impose a particular error message to third-party libraries.
match = None
err_msg = (
f"Estimator {name} should have raised error on fitting array y with inf"
" value."
)
with raises(ValueError, match=match, err_msg=err_msg):
estimator.fit(X, y)
def _yield_regressor_checks(regressor):
tags = _safe_tags(regressor)
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield partial(check_regressors_train, readonly_memmap=True)
yield partial(check_regressors_train, readonly_memmap=True, X_dtype="float32")
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
if tags["multioutput"]:
yield check_regressor_multioutput
yield check_regressors_no_decision_function
if not tags["no_validation"] and not tags["multioutput_only"]:
yield check_supervised_y_2d
yield check_supervised_y_no_nan
name = regressor.__class__.__name__
if name != "CCA":
# check that the regressor handles int input
yield check_regressors_int
if tags["requires_fit"]:
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(transformer):
tags = _safe_tags(transformer)
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if not tags["no_validation"]:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
yield check_transformer_general
if tags["preserves_dtype"]:
yield check_transformer_preserve_dtypes
yield partial(check_transformer_general, readonly_memmap=True)
if not _safe_tags(transformer, key="stateless"):
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = [
"Isomap",
"KernelPCA",
"LocallyLinearEmbedding",
"RandomizedLasso",
"LogisticRegressionCV",
]
name = transformer.__class__.__name__
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(clusterer):
yield check_clusterer_compute_labels_predict
name = clusterer.__class__.__name__
if name not in ("WardAgglomeration", "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield partial(check_clustering, readonly_memmap=True)
yield check_estimators_partial_fit_n_features
yield check_non_transformer_estimators_n_iter
def _yield_outliers_checks(estimator):
# checks for outlier detectors that have a fit_predict method
if hasattr(estimator, "fit_predict"):
yield check_outliers_fit_predict
# checks for estimators that can be used on a test set
if hasattr(estimator, "predict"):
yield check_outliers_train
yield partial(check_outliers_train, readonly_memmap=True)
# test outlier detectors can handle non-array data
yield check_classifier_data_not_an_array
# test if NotFittedError is raised
if _safe_tags(estimator, key="requires_fit"):
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_all_checks(estimator):
name = estimator.__class__.__name__
tags = _safe_tags(estimator)
if "2darray" not in tags["X_types"]:
warnings.warn(
"Can't test estimator {} which requires input of type {}".format(
name, tags["X_types"]
),
SkipTestWarning,
)
return
if tags["_skip_test"]:
warnings.warn(
"Explicit SKIP via _skip_test tag for estimator {}.".format(name),
SkipTestWarning,
)
return
for check in _yield_checks(estimator):
yield check
if is_classifier(estimator):
for check in _yield_classifier_checks(estimator):
yield check
if is_regressor(estimator):
for check in _yield_regressor_checks(estimator):
yield check
if hasattr(estimator, "transform"):
for check in _yield_transformer_checks(estimator):
yield check
if isinstance(estimator, ClusterMixin):
for check in _yield_clustering_checks(estimator):
yield check
if is_outlier_detector(estimator):
for check in _yield_outliers_checks(estimator):
yield check
yield check_parameters_default_constructible
yield check_methods_sample_order_invariance
yield check_methods_subset_invariance
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_get_params_invariance
yield check_set_params
yield check_dict_unchanged
yield check_dont_overwrite_parameters
yield check_fit_idempotent
yield check_fit_check_is_fitted
if not tags["no_validation"]:
yield check_n_features_in
yield check_fit1d
yield check_fit2d_predict1d
if tags["requires_y"]:
yield check_requires_y_none
if tags["requires_positive_X"]:
yield check_fit_non_negative
def _get_check_estimator_ids(obj):
"""Create pytest ids for checks.
When `obj` is an estimator, this returns the pprint version of the
estimator (with `print_changed_only=True`). When `obj` is a function, the
name of the function is returned with its keyword arguments.
`_get_check_estimator_ids` is designed to be used as the `id` in
`pytest.mark.parametrize` where `check_estimator(..., generate_only=True)`
is yielding estimators and checks.
Parameters
----------
obj : estimator or function
Items generated by `check_estimator`.
Returns
-------
id : str or None
See Also
--------
check_estimator
"""
if callable(obj):
if not isinstance(obj, partial):
return obj.__name__
if not obj.keywords:
return obj.func.__name__
kwstring = ",".join(["{}={}".format(k, v) for k, v in obj.keywords.items()])
return "{}({})".format(obj.func.__name__, kwstring)
if hasattr(obj, "get_params"):
with config_context(print_changed_only=True):
return re.sub(r"\s", "", str(obj))
def _construct_instance(Estimator):
"""Construct Estimator instance if possible."""
required_parameters = getattr(Estimator, "_required_parameters", [])
if len(required_parameters):
if required_parameters in (["estimator"], ["base_estimator"]):
# `RANSACRegressor` will raise an error with any model other
# than `LinearRegression` if we don't fix `min_samples` parameter.
# For common test, we can enforce using `LinearRegression` that
# is the default estimator in `RANSACRegressor` instead of `Ridge`.
if issubclass(Estimator, RANSACRegressor):
estimator = Estimator(LinearRegression())
elif issubclass(Estimator, RegressorMixin):
estimator = Estimator(Ridge())
else:
estimator = Estimator(LogisticRegression(C=1))
elif required_parameters in (["estimators"],):
# Heterogeneous ensemble classes (i.e. stacking, voting)
if issubclass(Estimator, RegressorMixin):
estimator = Estimator(
estimators=[("est1", Ridge(alpha=0.1)), ("est2", Ridge(alpha=1))]
)
else:
estimator = Estimator(
estimators=[
("est1", LogisticRegression(C=0.1)),
("est2", LogisticRegression(C=1)),
]
)
else:
msg = (
f"Can't instantiate estimator {Estimator.__name__} "
f"parameters {required_parameters}"
)
# raise additional warning to be shown by pytest
warnings.warn(msg, SkipTestWarning)
raise SkipTest(msg)
else:
estimator = Estimator()
return estimator
def _maybe_mark_xfail(estimator, check, pytest):
# Mark (estimator, check) pairs as XFAIL if needed (see conditions in
# _should_be_skipped_or_marked())
# This is similar to _maybe_skip(), but this one is used by
# @parametrize_with_checks() instead of check_estimator()
should_be_marked, reason = _should_be_skipped_or_marked(estimator, check)
if not should_be_marked:
return estimator, check
else:
return pytest.param(estimator, check, marks=pytest.mark.xfail(reason=reason))
def _maybe_skip(estimator, check):
# Wrap a check so that it's skipped if needed (see conditions in
# _should_be_skipped_or_marked())
# This is similar to _maybe_mark_xfail(), but this one is used by
# check_estimator() instead of @parametrize_with_checks which requires
# pytest
should_be_skipped, reason = _should_be_skipped_or_marked(estimator, check)
if not should_be_skipped:
return check
check_name = check.func.__name__ if isinstance(check, partial) else check.__name__
@wraps(check)
def wrapped(*args, **kwargs):
raise SkipTest(
f"Skipping {check_name} for {estimator.__class__.__name__}: {reason}"
)
return wrapped
def _should_be_skipped_or_marked(estimator, check):
# Return whether a check should be skipped (when using check_estimator())
# or marked as XFAIL (when using @parametrize_with_checks()), along with a
# reason.
# Currently, a check should be skipped or marked if
# the check is in the _xfail_checks tag of the estimator
check_name = check.func.__name__ if isinstance(check, partial) else check.__name__
xfail_checks = _safe_tags(estimator, key="_xfail_checks") or {}
if check_name in xfail_checks:
return True, xfail_checks[check_name]
return False, "placeholder reason that will never be used"
def parametrize_with_checks(estimators):
"""Pytest specific decorator for parametrizing estimator checks.
The `id` of each check is set to be a pprint version of the estimator
and the name of the check with its keyword arguments.
This allows to use `pytest -k` to specify which tests to run::
pytest test_check_estimators.py -k check_estimators_fit_returns_self
Parameters
----------
estimators : list of estimators instances
Estimators to generated checks for.
.. versionchanged:: 0.24
Passing a class was deprecated in version 0.23, and support for
classes was removed in 0.24. Pass an instance instead.
.. versionadded:: 0.24
Returns
-------
decorator : `pytest.mark.parametrize`
See Also
--------
check_estimator : Check if estimator adheres to scikit-learn conventions.
Examples
--------
>>> from sklearn.utils.estimator_checks import parametrize_with_checks
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.tree import DecisionTreeRegressor
>>> @parametrize_with_checks([LogisticRegression(),
... DecisionTreeRegressor()])
... def test_sklearn_compatible_estimator(estimator, check):
... check(estimator)
"""
import pytest
if any(isinstance(est, type) for est in estimators):
msg = (
"Passing a class was deprecated in version 0.23 "
"and isn't supported anymore from 0.24."
"Please pass an instance instead."
)
raise TypeError(msg)
def checks_generator():
for estimator in estimators:
name = type(estimator).__name__
for check in _yield_all_checks(estimator):
check = partial(check, name)
yield _maybe_mark_xfail(estimator, check, pytest)
return pytest.mark.parametrize(
"estimator, check", checks_generator(), ids=_get_check_estimator_ids
)
def check_estimator(Estimator, generate_only=False):
"""Check if estimator adheres to scikit-learn conventions.
This function will run an extensive test-suite for input validation,
shapes, etc, making sure that the estimator complies with `scikit-learn`
conventions as detailed in :ref:`rolling_your_own_estimator`.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Setting `generate_only=True` returns a generator that yields (estimator,
check) tuples where the check can be called independently from each
other, i.e. `check(estimator)`. This allows all checks to be run
independently and report the checks that are failing.
scikit-learn provides a pytest specific decorator,
:func:`~sklearn.utils.parametrize_with_checks`, making it easier to test
multiple estimators.
Parameters
----------
Estimator : estimator object
Estimator instance to check.
.. versionchanged:: 0.24
Passing a class was deprecated in version 0.23, and support for
classes was removed in 0.24.
generate_only : bool, default=False
When `False`, checks are evaluated when `check_estimator` is called.
When `True`, `check_estimator` returns a generator that yields
(estimator, check) tuples. The check is run by calling
`check(estimator)`.
.. versionadded:: 0.22
Returns
-------
checks_generator : generator
Generator that yields (estimator, check) tuples. Returned when
`generate_only=True`.
See Also
--------
parametrize_with_checks : Pytest specific decorator for parametrizing estimator
checks.
"""
if isinstance(Estimator, type):
msg = (
"Passing a class was deprecated in version 0.23 "
"and isn't supported anymore from 0.24."
"Please pass an instance instead."
)
raise TypeError(msg)
estimator = Estimator
name = type(estimator).__name__
def checks_generator():
for check in _yield_all_checks(estimator):
check = _maybe_skip(estimator, check)
yield estimator, partial(check, name)
if generate_only:
return checks_generator()
for estimator, check in checks_generator():
try:
check(estimator)
except SkipTest as exception:
# SkipTest is thrown when pandas can't be imported, or by checks
# that are in the xfail_checks tag
warnings.warn(str(exception), SkipTestWarning)
def _regression_dataset():
global REGRESSION_DATASET
if REGRESSION_DATASET is None:
X, y = make_regression(
n_samples=200,
n_features=10,
n_informative=1,
bias=5.0,
noise=20,
random_state=42,
)
X = StandardScaler().fit_transform(X)
REGRESSION_DATASET = X, y
return REGRESSION_DATASET
def _set_checking_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
name = estimator.__class__.__name__
if "n_iter" in params and name != "TSNE":
estimator.set_params(n_iter=5)
if "max_iter" in params:
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR, LinearSVC
if estimator.__class__.__name__ in ["LinearSVR", "LinearSVC"]:
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == "NMF":
estimator.set_params(max_iter=500)
# MLP
if estimator.__class__.__name__ in ["MLPClassifier", "MLPRegressor"]:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if name == "MeanShift":
# In the case of check_fit2d_1sample, bandwidth is set to None and
# is thus estimated. De facto it is 0.0 as a single sample is provided
# and this makes the test fails. Hence we give it a placeholder value.
estimator.set_params(bandwidth=1.0)
if name == "TruncatedSVD":
# TruncatedSVD doesn't run with n_components = n_features
# This is ugly :-/
estimator.n_components = 1
if name == "LassoLarsIC":
# Noise variance estimation does not work when `n_samples < n_features`.
# We need to provide the noise variance explicitly.
estimator.set_params(noise_variance=1.0)
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = min(estimator.n_clusters, 2)
if hasattr(estimator, "n_best"):
estimator.n_best = 1
if name == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=0.5)
if name == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=2)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
if name in ("HistGradientBoostingClassifier", "HistGradientBoostingRegressor"):
# The default min_samples_leaf (20) isn't appropriate for small
# datasets (only very shallow trees are built) that the checks use.
estimator.set_params(min_samples_leaf=5)
if name == "DummyClassifier":
# the default strategy prior would output constant predictions and fail
# for check_classifiers_predictions
estimator.set_params(strategy="stratified")
# Speed-up by reducing the number of CV or splits for CV estimators
loo_cv = ["RidgeCV", "RidgeClassifierCV"]
if name not in loo_cv and hasattr(estimator, "cv"):
estimator.set_params(cv=3)
if hasattr(estimator, "n_splits"):
estimator.set_params(n_splits=3)
if name == "OneHotEncoder":
estimator.set_params(handle_unknown="ignore")
if name in CROSS_DECOMPOSITION:
estimator.set_params(n_components=1)
class _NotAnArray:
"""An object that is convertible to an array.
Parameters
----------
data : array-like
The data.
"""
def __init__(self, data):
self.data = np.asarray(data)
def __array__(self, dtype=None):
return self.data
def __array_function__(self, func, types, args, kwargs):
if func.__name__ == "may_share_memory":
return True
raise TypeError("Don't want to call array_function {}!".format(func.__name__))
def _is_pairwise_metric(estimator):
"""Returns True if estimator accepts pairwise metric.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if _pairwise is set to True and False otherwise.
"""
metric = getattr(estimator, "metric", None)
return bool(metric == "precomputed")
def _pairwise_estimator_convert_X(X, estimator, kernel=linear_kernel):
if _is_pairwise_metric(estimator):
return pairwise_distances(X, metric="euclidean")
if _is_pairwise(estimator):
return kernel(X, X)
return X
def _generate_sparse_matrix(X_csr):
"""Generate sparse matrices with {32,64}bit indices of diverse format.
Parameters
----------
X_csr: CSR Matrix
Input matrix in CSR format.
Returns
-------
out: iter(Matrices)
In format['dok', 'lil', 'dia', 'bsr', 'csr', 'csc', 'coo',
'coo_64', 'csc_64', 'csr_64']
"""
assert X_csr.format == "csr"
yield "csr", X_csr.copy()
for sparse_format in ["dok", "lil", "dia", "bsr", "csc", "coo"]:
yield sparse_format, X_csr.asformat(sparse_format)
# Generate large indices matrix only if its supported by scipy
X_coo = X_csr.asformat("coo")
X_coo.row = X_coo.row.astype("int64")
X_coo.col = X_coo.col.astype("int64")
yield "coo_64", X_coo
for sparse_format in ["csc", "csr"]:
X = X_csr.asformat(sparse_format)
X.indices = X.indices.astype("int64")
X.indptr = X.indptr.astype("int64")
yield sparse_format + "_64", X
def check_estimator_sparse_data(name, estimator_orig):
rng = np.random.RandomState(0)
X = rng.rand(40, 3)
X[X < 0.8] = 0
X = _pairwise_estimator_convert_X(X, estimator_orig)
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(int)
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
tags = _safe_tags(estimator_orig)
for matrix_format, X in _generate_sparse_matrix(X_csr):
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
estimator = clone(estimator_orig)
if name in ["Scaler", "StandardScaler"]:
estimator.set_params(with_mean=False)
# fit and predict
if "64" in matrix_format:
err_msg = (
f"Estimator {name} doesn't seem to support {matrix_format} "
"matrix, and is not failing gracefully, e.g. by using "
"check_array(X, accept_large_sparse=False)"
)
else:
err_msg = (
f"Estimator {name} doesn't seem to fail gracefully on sparse "
"data: error message should state explicitly that sparse "
"input is not supported if this is not the case."
)
with raises(
(TypeError, ValueError),
match=["sparse", "Sparse"],
may_pass=True,
err_msg=err_msg,
):
with ignore_warnings(category=FutureWarning):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
if tags["multioutput_only"]:
assert pred.shape == (X.shape[0], 1)
else:
assert pred.shape == (X.shape[0],)
if hasattr(estimator, "predict_proba"):
probs = estimator.predict_proba(X)
if tags["binary_only"]:
expected_probs_shape = (X.shape[0], 2)
else:
expected_probs_shape = (X.shape[0], 4)
assert probs.shape == expected_probs_shape
@ignore_warnings(category=FutureWarning)
def check_sample_weights_pandas_series(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = clone(estimator_orig)
try:
import pandas as pd
X = np.array(
[
[1, 1],
[1, 2],
[1, 3],
[1, 4],
[2, 1],
[2, 2],
[2, 3],
[2, 4],
[3, 1],
[3, 2],
[3, 3],
[3, 4],
]
)
X = pd.DataFrame(_pairwise_estimator_convert_X(X, estimator_orig))
y = pd.Series([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])
weights = pd.Series([1] * 12)
if _safe_tags(estimator, key="multioutput_only"):
y = pd.DataFrame(y)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError(
"Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name)
)
except ImportError:
raise SkipTest(
"pandas is not installed: not testing for "
"input of type pandas.Series to class weight."
)
@ignore_warnings(category=(FutureWarning))
def check_sample_weights_not_an_array(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type _NotAnArray in the 'fit' function.
estimator = clone(estimator_orig)
X = np.array(
[
[1, 1],
[1, 2],
[1, 3],
[1, 4],
[2, 1],
[2, 2],
[2, 3],
[2, 4],
[3, 1],
[3, 2],
[3, 3],
[3, 4],
]
)
X = _NotAnArray(_pairwise_estimator_convert_X(X, estimator_orig))
y = _NotAnArray([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])
weights = _NotAnArray([1] * 12)
if _safe_tags(estimator, key="multioutput_only"):
y = _NotAnArray(y.data.reshape(-1, 1))
estimator.fit(X, y, sample_weight=weights)
@ignore_warnings(category=(FutureWarning))
def check_sample_weights_list(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type list in the 'fit' function.
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
n_samples = 30
X = _pairwise_estimator_convert_X(rnd.uniform(size=(n_samples, 3)), estimator_orig)
y = np.arange(n_samples) % 3
y = _enforce_estimator_tags_y(estimator, y)
sample_weight = [3] * n_samples
# Test that estimators don't raise any exception
estimator.fit(X, y, sample_weight=sample_weight)
@ignore_warnings(category=FutureWarning)
def check_sample_weights_shape(name, estimator_orig):
# check that estimators raise an error if sample_weight
# shape mismatches the input
estimator = clone(estimator_orig)
X = np.array(
[
[1, 3],
[1, 3],
[1, 3],
[1, 3],
[2, 1],
[2, 1],
[2, 1],
[2, 1],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[4, 1],
[4, 1],
[4, 1],
[4, 1],
]
)
y = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2])
y = _enforce_estimator_tags_y(estimator, y)
estimator.fit(X, y, sample_weight=np.ones(len(y)))
with raises(ValueError):
estimator.fit(X, y, sample_weight=np.ones(2 * len(y)))
with raises(ValueError):
estimator.fit(X, y, sample_weight=np.ones((len(y), 2)))
@ignore_warnings(category=FutureWarning)
def check_sample_weights_invariance(name, estimator_orig, kind="ones"):
# For kind="ones" check that the estimators yield same results for
# unit weights and no weights
# For kind="zeros" check that setting sample_weight to 0 is equivalent
# to removing corresponding samples.
estimator1 = clone(estimator_orig)
estimator2 = clone(estimator_orig)
set_random_state(estimator1, random_state=0)
set_random_state(estimator2, random_state=0)
X1 = np.array(
[
[1, 3],
[1, 3],
[1, 3],
[1, 3],
[2, 1],
[2, 1],
[2, 1],
[2, 1],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[4, 1],
[4, 1],
[4, 1],
[4, 1],
],
dtype=np.float64,
)
y1 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int)
if kind == "ones":
X2 = X1
y2 = y1
sw2 = np.ones(shape=len(y1))
err_msg = (
f"For {name} sample_weight=None is not equivalent to sample_weight=ones"
)
elif kind == "zeros":
# Construct a dataset that is very different to (X, y) if weights
# are disregarded, but identical to (X, y) given weights.
X2 = np.vstack([X1, X1 + 1])
y2 = np.hstack([y1, 3 - y1])
sw2 = np.ones(shape=len(y1) * 2)
sw2[len(y1) :] = 0
X2, y2, sw2 = shuffle(X2, y2, sw2, random_state=0)
err_msg = (
f"For {name}, a zero sample_weight is not equivalent to removing the sample"
)
else: # pragma: no cover
raise ValueError
y1 = _enforce_estimator_tags_y(estimator1, y1)
y2 = _enforce_estimator_tags_y(estimator2, y2)
estimator1.fit(X1, y=y1, sample_weight=None)
estimator2.fit(X2, y=y2, sample_weight=sw2)
for method in ["predict", "predict_proba", "decision_function", "transform"]:
if hasattr(estimator_orig, method):
X_pred1 = getattr(estimator1, method)(X1)
X_pred2 = getattr(estimator2, method)(X1)
assert_allclose_dense_sparse(X_pred1, X_pred2, err_msg=err_msg)
def check_sample_weights_not_overwritten(name, estimator_orig):
# check that estimators don't override the passed sample_weight parameter
estimator = clone(estimator_orig)
set_random_state(estimator, random_state=0)
X = np.array(
[
[1, 3],
[1, 3],
[1, 3],
[1, 3],
[2, 1],
[2, 1],
[2, 1],
[2, 1],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[4, 1],
[4, 1],
[4, 1],
[4, 1],
],
dtype=np.float64,
)
y = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int)
y = _enforce_estimator_tags_y(estimator, y)
sample_weight_original = np.ones(y.shape[0])
sample_weight_original[0] = 10.0
sample_weight_fit = sample_weight_original.copy()
estimator.fit(X, y, sample_weight=sample_weight_fit)
err_msg = "{name} overwrote the original `sample_weight` given during fit"
assert_allclose(sample_weight_fit, sample_weight_original, err_msg=err_msg)
@ignore_warnings(category=(FutureWarning, UserWarning))
def check_dtype_object(name, estimator_orig):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = _pairwise_estimator_convert_X(rng.rand(40, 10), estimator_orig)
X = X.astype(object)
tags = _safe_tags(estimator_orig)
y = (X[:, 0] * 4).astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
with raises(Exception, match="Unknown label type", may_pass=True):
estimator.fit(X, y.astype(object))
if "string" not in tags["X_types"]:
X[0, 0] = {"foo": "bar"}
msg = "argument must be a string.* number"
with raises(TypeError, match=msg):
estimator.fit(X, y)
else:
# Estimators supporting string will not call np.asarray to convert the
# data to numeric and therefore, the error will not be raised.
# Checking for each element dtype in the input array will be costly.
# Refer to #11401 for full discussion.
estimator.fit(X, y)
def check_complex_data(name, estimator_orig):
rng = np.random.RandomState(42)
# check that estimators raise an exception on providing complex data
X = rng.uniform(size=10) + 1j * rng.uniform(size=10)
X = X.reshape(-1, 1)
# Something both valid for classification and regression
y = rng.randint(low=0, high=2, size=10) + 1j
estimator = clone(estimator_orig)
set_random_state(estimator, random_state=0)
with raises(ValueError, match="Complex data not supported"):
estimator.fit(X, y)
@ignore_warnings
def check_dict_unchanged(name, estimator_orig):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ["SpectralCoclustering"]:
return
rnd = np.random.RandomState(0)
if name in ["RANSACRegressor"]:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function", "predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert estimator.__dict__ == dict_before, (
"Estimator changes __dict__ during %s" % method
)
def _is_public_parameter(attr):
return not (attr.startswith("_") or attr.endswith("_"))
@ignore_warnings(category=FutureWarning)
def check_dont_overwrite_parameters(name, estimator_orig):
# check that fit method only changes or sets private attributes
if hasattr(estimator_orig.__init__, "deprecated_original"):
# to not check deprecated classes
return
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [
key for key in dict_after_fit.keys() if _is_public_parameter(key)
]
attrs_added_by_fit = [
key for key in public_keys_after_fit if key not in dict_before_fit.keys()
]
# check that fit doesn't add any public attribute
assert not attrs_added_by_fit, (
"Estimator adds public attribute(s) during"
" the fit method."
" Estimators are only allowed to add private attributes"
" either started with _ or ended"
" with _ but %s added"
% ", ".join(attrs_added_by_fit)
)
# check that fit doesn't change any public attribute
attrs_changed_by_fit = [
key
for key in public_keys_after_fit
if (dict_before_fit[key] is not dict_after_fit[key])
]
assert not attrs_changed_by_fit, (
"Estimator changes public attribute(s) during"
" the fit method. Estimators are only allowed"
" to change attributes started"
" or ended with _, but"
" %s changed"
% ", ".join(attrs_changed_by_fit)
)
@ignore_warnings(category=FutureWarning)
def check_fit2d_predict1d(name, estimator_orig):
# check by fitting a 2d array and predicting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function", "predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(
ValueError, "Reshape your data", getattr(estimator, method), X[0]
)
def _apply_on_subsets(func, X):
# apply function on the whole set and on mini batches
result_full = func(X)
n_features = X.shape[1]
result_by_batch = [func(batch.reshape(1, n_features)) for batch in X]
# func can output tuple (e.g. score_samples)
if type(result_full) == tuple:
result_full = result_full[0]
result_by_batch = list(map(lambda x: x[0], result_by_batch))
if sparse.issparse(result_full):
result_full = result_full.A
result_by_batch = [x.A for x in result_by_batch]
return np.ravel(result_full), np.ravel(result_by_batch)
@ignore_warnings(category=FutureWarning)
def check_methods_subset_invariance(name, estimator_orig):
# check that method gives invariant results if applied
# on mini batches or the whole set
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in [
"predict",
"transform",
"decision_function",
"score_samples",
"predict_proba",
]:
msg = ("{method} of {name} is not invariant when applied to a subset.").format(
method=method, name=name
)
if hasattr(estimator, method):
result_full, result_by_batch = _apply_on_subsets(
getattr(estimator, method), X
)
assert_allclose(result_full, result_by_batch, atol=1e-7, err_msg=msg)
@ignore_warnings(category=FutureWarning)
def check_methods_sample_order_invariance(name, estimator_orig):
# check that method gives invariant results if applied
# on a subset with different sample order
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(np.int64)
if _safe_tags(estimator_orig, key="binary_only"):
y[y == 2] = 1
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 2
set_random_state(estimator, 1)
estimator.fit(X, y)
idx = np.random.permutation(X.shape[0])
for method in [
"predict",
"transform",
"decision_function",
"score_samples",
"predict_proba",
]:
msg = (
"{method} of {name} is not invariant when applied to a dataset"
"with different sample order."
).format(method=method, name=name)
if hasattr(estimator, method):
assert_allclose_dense_sparse(
getattr(estimator, method)(X)[idx],
getattr(estimator, method)(X[idx]),
atol=1e-9,
err_msg=msg,
)
@ignore_warnings
def check_fit2d_1sample(name, estimator_orig):
# Check that fitting a 2d array with only one sample either works or
# returns an informative message. The error message should either mention
# the number of samples or the number of classes.
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
# min_cluster_size cannot be less than the data size for OPTICS.
if name == "OPTICS":
estimator.set_params(min_samples=1)
msgs = [
"1 sample",
"n_samples = 1",
"n_samples=1",
"one sample",
"1 class",
"one class",
]
with raises(ValueError, match=msgs, may_pass=True):
estimator.fit(X, y)
@ignore_warnings
def check_fit2d_1feature(name, estimator_orig):
# check fitting a 2d array with only 1 feature either works or returns
# informative message
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
# ensure two labels in subsample for RandomizedLogisticRegression
if name == "RandomizedLogisticRegression":
estimator.sample_fraction = 1
# ensure non skipped trials for RANSACRegressor
if name == "RANSACRegressor":
estimator.residual_threshold = 0.5
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator, 1)
msgs = [r"1 feature\(s\)", "n_features = 1", "n_features=1"]
with raises(ValueError, match=msgs, may_pass=True):
estimator.fit(X, y)
@ignore_warnings
def check_fit1d(name, estimator_orig):
# check fitting 1d X array raises a ValueError
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
with raises(ValueError):
estimator.fit(X, y)
@ignore_warnings(category=FutureWarning)
def check_transformer_general(name, transformer, readonly_memmap=False):
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X = StandardScaler().fit_transform(X)
X -= X.min()
X = _pairwise_estimator_convert_X(X, transformer)
if readonly_memmap:
X, y = create_memmap_backed_data([X, y])
_check_transformer(name, transformer, X, y)
@ignore_warnings(category=FutureWarning)
def check_transformer_data_not_an_array(name, transformer):
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - 0.1
X = _pairwise_estimator_convert_X(X, transformer)
this_X = _NotAnArray(X)
this_y = _NotAnArray(np.asarray(y))
_check_transformer(name, transformer, this_X, this_y)
# try the same with some list
_check_transformer(name, transformer, X.tolist(), y.tolist())
@ignore_warnings(category=FutureWarning)
def check_transformers_unfitted(name, transformer):
X, y = _regression_dataset()
transformer = clone(transformer)
with raises(
(AttributeError, ValueError),
err_msg=(
"The unfitted "
f"transformer {name} does not raise an error when "
"transform is called. Perhaps use "
"check_is_fitted in transform."
),
):
transformer.transform(X)
def _check_transformer(name, transformer_orig, X, y):
n_samples, n_features = np.asarray(X).shape
transformer = clone(transformer_orig)
set_random_state(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[np.asarray(y), np.asarray(y)]
y_[::2, 1] *= 2
if isinstance(X, _NotAnArray):
y_ = _NotAnArray(y_)
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert x_pred.shape[0] == n_samples
else:
# check for consistent n_samples
assert X_pred.shape[0] == n_samples
if hasattr(transformer, "transform"):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if _safe_tags(transformer_orig, key="non_deterministic"):
msg = name + " is non deterministic"
raise SkipTest(msg)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_allclose_dense_sparse(
x_pred,
x_pred2,
atol=1e-2,
err_msg="fit_transform and transform outcomes not consistent in %s"
% transformer,
)
assert_allclose_dense_sparse(
x_pred,
x_pred3,
atol=1e-2,
err_msg="consecutive fit_transform outcomes not consistent in %s"
% transformer,
)
else:
assert_allclose_dense_sparse(
X_pred,
X_pred2,
err_msg="fit_transform and transform outcomes not consistent in %s"
% transformer,
atol=1e-2,
)
assert_allclose_dense_sparse(
X_pred,
X_pred3,
atol=1e-2,
err_msg="consecutive fit_transform outcomes not consistent in %s"
% transformer,
)
assert _num_samples(X_pred2) == n_samples
assert _num_samples(X_pred3) == n_samples
# raises error on malformed input for transform
if (
hasattr(X, "shape")
and not _safe_tags(transformer, key="stateless")
and X.ndim == 2
and X.shape[1] > 1
):
# If it's not an array, it does not have a 'T' property
with raises(
ValueError,
err_msg=(
f"The transformer {name} does not raise an error "
"when the number of features in transform is different from "
"the number of features in fit."
),
):
transformer.transform(X[:, :-1])
@ignore_warnings
def check_pipeline_consistency(name, estimator_orig):
if _safe_tags(estimator_orig, key="non_deterministic"):
msg = name + " is non deterministic"
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X -= X.min()
X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_allclose_dense_sparse(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, estimator_orig):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
n_samples = 30
X = rnd.uniform(size=(n_samples, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = np.arange(n_samples) % 3
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
if args[0] == "self":
# if_delegate_has_method makes methods into functions
# with an explicit "self", so need to shift arguments
args = args[1:]
assert args[1] in ["y", "Y"], (
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, type(estimator).__name__, args)
)
@ignore_warnings
def check_estimators_dtypes(name, estimator_orig):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_32 = _pairwise_estimator_convert_X(X_train_32, estimator_orig)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = _enforce_estimator_tags_y(estimator_orig, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_transformer_preserve_dtypes(name, transformer_orig):
# check that dtype are preserved meaning if input X is of some dtype
# X_transformed should be from the same dtype.
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
cluster_std=0.1,
)
X = StandardScaler().fit_transform(X)
X -= X.min()
X = _pairwise_estimator_convert_X(X, transformer_orig)
for dtype in _safe_tags(transformer_orig, key="preserves_dtype"):
X_cast = X.astype(dtype)
transformer = clone(transformer_orig)
set_random_state(transformer)
X_trans = transformer.fit_transform(X_cast, y)
if isinstance(X_trans, tuple):
# cross-decompostion returns a tuple of (x_scores, y_scores)
# when given y with fit_transform; only check the first element
X_trans = X_trans[0]
# check that the output dtype is preserved
assert X_trans.dtype == dtype, (
f"Estimator transform dtype: {X_trans.dtype} - "
f"original/expected dtype: {dtype.__name__}"
)
@ignore_warnings(category=FutureWarning)
def check_estimators_empty_data_messages(name, estimator_orig):
e = clone(estimator_orig)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
err_msg = (
f"The estimator {name} does not raise a ValueError when an "
"empty data is used to train. Perhaps use check_array in train."
)
with raises(ValueError, err_msg=err_msg):
e.fit(X_zero_samples, [])
X_zero_features = np.empty(0).reshape(12, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = _enforce_estimator_tags_y(e, np.array([1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]))
msg = r"0 feature\(s\) \(shape=\(\d*, 0\)\) while a minimum of \d* " "is required."
with raises(ValueError, match=msg):
e.fit(X_zero_features, y)
@ignore_warnings(category=FutureWarning)
def check_estimators_nan_inf(name, estimator_orig):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = _pairwise_estimator_convert_X(
rnd.uniform(size=(10, 3)), estimator_orig
)
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = _enforce_estimator_tags_y(estimator_orig, y)
error_string_fit = f"Estimator {name} doesn't check for NaN and inf in fit."
error_string_predict = f"Estimator {name} doesn't check for NaN and inf in predict."
error_string_transform = (
f"Estimator {name} doesn't check for NaN and inf in transform."
)
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
# try to fit
with raises(ValueError, match=["inf", "NaN"], err_msg=error_string_fit):
estimator.fit(X_train, y)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
with raises(
ValueError,
match=["inf", "NaN"],
err_msg=error_string_predict,
):
estimator.predict(X_train)
# transform
if hasattr(estimator, "transform"):
with raises(
ValueError,
match=["inf", "NaN"],
err_msg=error_string_transform,
):
estimator.transform(X_train)
@ignore_warnings
def check_nonsquare_error(name, estimator_orig):
"""Test that error is thrown when non-square data provided."""
X, y = make_blobs(n_samples=20, n_features=10)
estimator = clone(estimator_orig)
with raises(
ValueError,
err_msg=(
f"The pairwise estimator {name} does not raise an error on non-square data"
),
):
estimator.fit(X, y)
@ignore_warnings
def check_estimators_pickle(name, estimator_orig):
"""Test that we can pickle all estimators."""
check_methods = ["predict", "transform", "decision_function", "predict_proba"]
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
# some estimators can't do features less than 0
X -= X.min()
X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)
tags = _safe_tags(estimator_orig)
# include NaN values when the estimator should deal with them
if tags["allow_nan"]:
# set randomly 10 elements to np.nan
rng = np.random.RandomState(42)
mask = rng.choice(X.size, 10, replace=False)
X.reshape(-1)[mask] = np.nan
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
estimator.fit(X, y)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
module_name = estimator.__module__
if module_name.startswith("sklearn.") and not (
"test_" in module_name or module_name.endswith("_testing")
):
# strict check for sklearn estimators that are not implemented in test
# modules.
assert b"version" in pickled_estimator
unpickled_estimator = pickle.loads(pickled_estimator)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_allclose_dense_sparse(result[method], unpickled_result)
@ignore_warnings(category=FutureWarning)
def check_estimators_partial_fit_n_features(name, estimator_orig):
# check if number of features changes between calls to partial_fit.
if not hasattr(estimator_orig, "partial_fit"):
return
estimator = clone(estimator_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
y = _enforce_estimator_tags_y(estimator_orig, y)
try:
if is_classifier(estimator):
classes = np.unique(y)
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
except NotImplementedError:
return
with raises(
ValueError,
err_msg=(
f"The estimator {name} does not raise an error when the "
"number of features changes between calls to partial_fit."
),
):
estimator.partial_fit(X[:, :-1], y)
@ignore_warnings(category=FutureWarning)
def check_classifier_multioutput(name, estimator):
n_samples, n_labels, n_classes = 42, 5, 3
tags = _safe_tags(estimator)
estimator = clone(estimator)
X, y = make_multilabel_classification(
random_state=42, n_samples=n_samples, n_labels=n_labels, n_classes=n_classes
)
estimator.fit(X, y)
y_pred = estimator.predict(X)
assert y_pred.shape == (n_samples, n_classes), (
"The shape of the prediction for multioutput data is "
"incorrect. Expected {}, got {}.".format((n_samples, n_labels), y_pred.shape)
)
assert y_pred.dtype.kind == "i"
if hasattr(estimator, "decision_function"):
decision = estimator.decision_function(X)
assert isinstance(decision, np.ndarray)
assert decision.shape == (n_samples, n_classes), (
"The shape of the decision function output for "
"multioutput data is incorrect. Expected {}, got {}.".format(
(n_samples, n_classes), decision.shape
)
)
dec_pred = (decision > 0).astype(int)
dec_exp = estimator.classes_[dec_pred]
assert_array_equal(dec_exp, y_pred)
if hasattr(estimator, "predict_proba"):
y_prob = estimator.predict_proba(X)
if isinstance(y_prob, list) and not tags["poor_score"]:
for i in range(n_classes):
assert y_prob[i].shape == (n_samples, 2), (
"The shape of the probability for multioutput data is"
" incorrect. Expected {}, got {}.".format(
(n_samples, 2), y_prob[i].shape
)
)
assert_array_equal(
np.argmax(y_prob[i], axis=1).astype(int), y_pred[:, i]
)
elif not tags["poor_score"]:
assert y_prob.shape == (n_samples, n_classes), (
"The shape of the probability for multioutput data is"
" incorrect. Expected {}, got {}.".format(
(n_samples, n_classes), y_prob.shape
)
)
assert_array_equal(y_prob.round().astype(int), y_pred)
if hasattr(estimator, "decision_function") and hasattr(estimator, "predict_proba"):
for i in range(n_classes):
y_proba = estimator.predict_proba(X)[:, i]
y_decision = estimator.decision_function(X)
assert_array_equal(rankdata(y_proba), rankdata(y_decision[:, i]))
@ignore_warnings(category=FutureWarning)
def check_regressor_multioutput(name, estimator):
estimator = clone(estimator)
n_samples = n_features = 10
if not _is_pairwise_metric(estimator):
n_samples = n_samples + 1
X, y = make_regression(
random_state=42, n_targets=5, n_samples=n_samples, n_features=n_features
)
X = _pairwise_estimator_convert_X(X, estimator)
estimator.fit(X, y)
y_pred = estimator.predict(X)
assert y_pred.dtype == np.dtype("float64"), (
"Multioutput predictions by a regressor are expected to be"
" floating-point precision. Got {} instead".format(y_pred.dtype)
)
assert y_pred.shape == y.shape, (
"The shape of the prediction for multioutput data is incorrect."
" Expected {}, got {}."
)
@ignore_warnings(category=FutureWarning)
def check_clustering(name, clusterer_orig, readonly_memmap=False):
clusterer = clone(clusterer_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
rng = np.random.RandomState(7)
X_noise = np.concatenate([X, rng.uniform(low=-3, high=3, size=(5, 2))])
if readonly_memmap:
X, y, X_noise = create_memmap_backed_data([X, y, X_noise])
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
if hasattr(clusterer, "n_clusters"):
clusterer.set_params(n_clusters=3)
set_random_state(clusterer)
if name == "AffinityPropagation":
clusterer.set_params(preference=-100)
clusterer.set_params(max_iter=100)
# fit
clusterer.fit(X)
# with lists
clusterer.fit(X.tolist())
pred = clusterer.labels_
assert pred.shape == (n_samples,)
assert adjusted_rand_score(pred, y) > 0.4
if _safe_tags(clusterer, key="non_deterministic"):
return
set_random_state(clusterer)
with warnings.catch_warnings(record=True):
pred2 = clusterer.fit_predict(X)
assert_array_equal(pred, pred2)
# fit_predict(X) and labels_ should be of type int
assert pred.dtype in [np.dtype("int32"), np.dtype("int64")]
assert pred2.dtype in [np.dtype("int32"), np.dtype("int64")]
# Add noise to X to test the possible values of the labels
labels = clusterer.fit_predict(X_noise)
# There should be at least one sample in every cluster. Equivalently
# labels_ should contain all the consecutive values between its
# min and its max.
labels_sorted = np.unique(labels)
assert_array_equal(
labels_sorted, np.arange(labels_sorted[0], labels_sorted[-1] + 1)
)
# Labels are expected to start at 0 (no noise) or -1 (if noise)
assert labels_sorted[0] in [0, -1]
# Labels should be less than n_clusters - 1
if hasattr(clusterer, "n_clusters"):
n_clusters = getattr(clusterer, "n_clusters")
assert n_clusters - 1 >= labels_sorted[-1]
# else labels should be less than max(labels_) which is necessarily true
@ignore_warnings(category=FutureWarning)
def check_clusterer_compute_labels_predict(name, clusterer_orig):
"""Check that predict is invariant of compute_labels."""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = clone(clusterer_orig)
set_random_state(clusterer)
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
@ignore_warnings(category=FutureWarning)
def check_classifiers_one_label(name, classifier_orig):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = "Classifier can't predict when only one class is present."
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
classifier = clone(classifier_orig)
with raises(
ValueError, match="class", may_pass=True, err_msg=error_string_fit
) as cm:
classifier.fit(X_train, y)
if cm.raised_and_matched:
# ValueError was raised with proper error message
return
assert_array_equal(classifier.predict(X_test), y, err_msg=error_string_predict)
def _create_memmap_backed_data(numpy_arrays):
# OpenBLAS is known to segfault with unaligned data on the Prescott architecture
# See: https://github.com/scipy/scipy/issues/14886
has_prescott_openblas = any(
True
for info in threadpool_info()
if info["internal_api"] == "openblas"
# Prudently assume Prescott might be the architecture if it is unknown.
and info.get("architecture", "prescott").lower() == "prescott"
)
return [
create_memmap_backed_data(array, aligned=has_prescott_openblas)
for array in numpy_arrays
]
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(
name, classifier_orig, readonly_memmap=False, X_dtype="float64"
):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m = X_m.astype(X_dtype)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
if name in ["BernoulliNB", "MultinomialNB", "ComplementNB", "CategoricalNB"]:
X_m -= X_m.min()
X_b -= X_b.min()
if readonly_memmap:
X_m, y_m, X_b, y_b = _create_memmap_backed_data([X_m, y_m, X_b, y_b])
problems = [(X_b, y_b)]
tags = _safe_tags(classifier_orig)
if not tags["binary_only"]:
problems.append((X_m, y_m))
for (X, y) in problems:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = clone(classifier_orig)
X = _pairwise_estimator_convert_X(X, classifier)
y = _enforce_estimator_tags_y(classifier, y)
set_random_state(classifier)
# raises error on malformed input for fit
if not tags["no_validation"]:
with raises(
ValueError,
err_msg=(
f"The classifier {name} does not raise an error when "
"incorrect/malformed input data for fit is passed. The number "
"of training examples is not the same as the number of "
"labels. Perhaps use check_X_y in fit."
),
):
classifier.fit(X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert hasattr(classifier, "classes_")
y_pred = classifier.predict(X)
assert y_pred.shape == (n_samples,)
# training set performance
if not tags["poor_score"]:
assert accuracy_score(y, y_pred) > 0.83
# raises error on malformed input for predict
msg_pairwise = (
"The classifier {} does not raise an error when shape of X in "
" {} is not equal to (n_test_samples, n_training_samples)"
)
msg = (
"The classifier {} does not raise an error when the number of "
"features in {} is different from the number of features in "
"fit."
)
if not tags["no_validation"]:
if _is_pairwise(classifier):
with raises(
ValueError,
err_msg=msg_pairwise.format(name, "predict"),
):
classifier.predict(X.reshape(-1, 1))
else:
with raises(ValueError, err_msg=msg.format(name, "predict")):
classifier.predict(X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes == 2:
if not tags["multioutput_only"]:
assert decision.shape == (n_samples,)
else:
assert decision.shape == (n_samples, 1)
dec_pred = (decision.ravel() > 0).astype(int)
assert_array_equal(dec_pred, y_pred)
else:
assert decision.shape == (n_samples, n_classes)
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input for decision_function
if not tags["no_validation"]:
if _is_pairwise(classifier):
with raises(
ValueError,
err_msg=msg_pairwise.format(name, "decision_function"),
):
classifier.decision_function(X.reshape(-1, 1))
else:
with raises(
ValueError,
err_msg=msg.format(name, "decision_function"),
):
classifier.decision_function(X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert y_prob.shape == (n_samples, n_classes)
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1), np.ones(n_samples))
if not tags["no_validation"]:
# raises error on malformed input for predict_proba
if _is_pairwise(classifier_orig):
with raises(
ValueError,
err_msg=msg_pairwise.format(name, "predict_proba"),
):
classifier.predict_proba(X.reshape(-1, 1))
else:
with raises(
ValueError,
err_msg=msg.format(name, "predict_proba"),
):
classifier.predict_proba(X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_allclose(y_log_prob, np.log(y_prob), 8, atol=1e-9)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
def check_outlier_corruption(num_outliers, expected_outliers, decision):
# Check for deviation from the precise given contamination level that may
# be due to ties in the anomaly scores.
if num_outliers < expected_outliers:
start = num_outliers
end = expected_outliers + 1
else:
start = expected_outliers
end = num_outliers + 1
# ensure that all values in the 'critical area' are tied,
# leading to the observed discrepancy between provided
# and actual contamination levels.
sorted_decision = np.sort(decision)
msg = (
"The number of predicted outliers is not equal to the expected "
"number of outliers and this difference is not explained by the "
"number of ties in the decision_function values"
)
assert len(np.unique(sorted_decision[start:end])) == 1, msg
def check_outliers_train(name, estimator_orig, readonly_memmap=True):
n_samples = 300
X, _ = make_blobs(n_samples=n_samples, random_state=0)
X = shuffle(X, random_state=7)
if readonly_memmap:
X = create_memmap_backed_data(X)
n_samples, n_features = X.shape
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X)
# with lists
estimator.fit(X.tolist())
y_pred = estimator.predict(X)
assert y_pred.shape == (n_samples,)
assert y_pred.dtype.kind == "i"
assert_array_equal(np.unique(y_pred), np.array([-1, 1]))
decision = estimator.decision_function(X)
scores = estimator.score_samples(X)
for output in [decision, scores]:
assert output.dtype == np.dtype("float")
assert output.shape == (n_samples,)
# raises error on malformed input for predict
with raises(ValueError):
estimator.predict(X.T)
# decision_function agrees with predict
dec_pred = (decision >= 0).astype(int)
dec_pred[dec_pred == 0] = -1
assert_array_equal(dec_pred, y_pred)
# raises error on malformed input for decision_function
with raises(ValueError):
estimator.decision_function(X.T)
# decision_function is a translation of score_samples
y_dec = scores - estimator.offset_
assert_allclose(y_dec, decision)
# raises error on malformed input for score_samples
with raises(ValueError):
estimator.score_samples(X.T)
# contamination parameter (not for OneClassSVM which has the nu parameter)
if hasattr(estimator, "contamination") and not hasattr(estimator, "novelty"):
# proportion of outliers equal to contamination parameter when not
# set to 'auto'. This is true for the training set and cannot thus be
# checked as follows for estimators with a novelty parameter such as
# LocalOutlierFactor (tested in check_outliers_fit_predict)
expected_outliers = 30
contamination = expected_outliers / n_samples
estimator.set_params(contamination=contamination)
estimator.fit(X)
y_pred = estimator.predict(X)
num_outliers = np.sum(y_pred != 1)
# num_outliers should be equal to expected_outliers unless
# there are ties in the decision_function values. this can
# only be tested for estimators with a decision_function
# method, i.e. all estimators except LOF which is already
# excluded from this if branch.
if num_outliers != expected_outliers:
decision = estimator.decision_function(X)
check_outlier_corruption(num_outliers, expected_outliers, decision)
# raises error when contamination is a scalar and not in [0,1]
msg = r"contamination must be in \(0, 0.5]"
for contamination in [-0.5, 2.3]:
estimator.set_params(contamination=contamination)
with raises(ValueError, match=msg):
estimator.fit(X)
@ignore_warnings(category=FutureWarning)
def check_classifiers_multilabel_representation_invariance(name, classifier_orig):
X, y = make_multilabel_classification(
n_samples=100,
n_features=2,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0,
)
X = scale(X)
X_train, y_train = X[:80], y[:80]
X_test = X[80:]
y_train_list_of_lists = y_train.tolist()
y_train_list_of_arrays = list(y_train)
classifier = clone(classifier_orig)
set_random_state(classifier)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
y_pred_list_of_lists = classifier.fit(X_train, y_train_list_of_lists).predict(
X_test
)
y_pred_list_of_arrays = classifier.fit(X_train, y_train_list_of_arrays).predict(
X_test
)
assert_array_equal(y_pred, y_pred_list_of_arrays)
assert_array_equal(y_pred, y_pred_list_of_lists)
assert y_pred.dtype == y_pred_list_of_arrays.dtype
assert y_pred.dtype == y_pred_list_of_lists.dtype
assert type(y_pred) == type(y_pred_list_of_arrays)
assert type(y_pred) == type(y_pred_list_of_lists)
@ignore_warnings(category=FutureWarning)
def check_classifiers_multilabel_output_format_predict(name, classifier_orig):
"""Check the output of the `predict` method for classifiers supporting
multilabel-indicator targets."""
classifier = clone(classifier_orig)
set_random_state(classifier)
n_samples, test_size, n_outputs = 100, 25, 5
X, y = make_multilabel_classification(
n_samples=n_samples,
n_features=2,
n_classes=n_outputs,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0,
)
X = scale(X)
X_train, X_test = X[:-test_size], X[-test_size:]
y_train, y_test = y[:-test_size], y[-test_size:]
classifier.fit(X_train, y_train)
response_method_name = "predict"
predict_method = getattr(classifier, response_method_name, None)
if predict_method is None:
raise SkipTest(f"{name} does not have a {response_method_name} method.")
y_pred = predict_method(X_test)
# y_pred.shape -> y_test.shape with the same dtype
assert isinstance(y_pred, np.ndarray), (
f"{name}.predict is expected to output a NumPy array. Got "
f"{type(y_pred)} instead."
)
assert y_pred.shape == y_test.shape, (
f"{name}.predict outputs a NumPy array of shape {y_pred.shape} "
f"instead of {y_test.shape}."
)
assert y_pred.dtype == y_test.dtype, (
f"{name}.predict does not output the same dtype than the targets. "
f"Got {y_pred.dtype} instead of {y_test.dtype}."
)
@ignore_warnings(category=FutureWarning)
def check_classifiers_multilabel_output_format_predict_proba(name, classifier_orig):
"""Check the output of the `predict_proba` method for classifiers supporting
multilabel-indicator targets."""
classifier = clone(classifier_orig)
set_random_state(classifier)
n_samples, test_size, n_outputs = 100, 25, 5
X, y = make_multilabel_classification(
n_samples=n_samples,
n_features=2,
n_classes=n_outputs,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0,
)
X = scale(X)
X_train, X_test = X[:-test_size], X[-test_size:]
y_train = y[:-test_size]
classifier.fit(X_train, y_train)
response_method_name = "predict_proba"
predict_proba_method = getattr(classifier, response_method_name, None)
if predict_proba_method is None:
raise SkipTest(f"{name} does not have a {response_method_name} method.")
y_pred = predict_proba_method(X_test)
# y_pred.shape -> 2 possibilities:
# - list of length n_outputs of shape (n_samples, 2);
# - ndarray of shape (n_samples, n_outputs).
# dtype should be floating
if isinstance(y_pred, list):
assert len(y_pred) == n_outputs, (
f"When {name}.predict_proba returns a list, the list should "
"be of length n_outputs and contain NumPy arrays. Got length "
f"of {len(y_pred)} instead of {n_outputs}."
)
for pred in y_pred:
assert pred.shape == (test_size, 2), (
f"When {name}.predict_proba returns a list, this list "
"should contain NumPy arrays of shape (n_samples, 2). Got "
f"NumPy arrays of shape {pred.shape} instead of "
f"{(test_size, 2)}."
)
assert pred.dtype.kind == "f", (
f"When {name}.predict_proba returns a list, it should "
"contain NumPy arrays with floating dtype. Got "
f"{pred.dtype} instead."
)
# check that we have the correct probabilities
err_msg = (
f"When {name}.predict_proba returns a list, each NumPy "
"array should contain probabilities for each class and "
"thus each row should sum to 1 (or close to 1 due to "
"numerical errors)."
)
assert_allclose(pred.sum(axis=1), 1, err_msg=err_msg)
elif isinstance(y_pred, np.ndarray):
assert y_pred.shape == (test_size, n_outputs), (
f"When {name}.predict_proba returns a NumPy array, the "
f"expected shape is (n_samples, n_outputs). Got {y_pred.shape}"
f" instead of {(test_size, n_outputs)}."
)
assert y_pred.dtype.kind == "f", (
f"When {name}.predict_proba returns a NumPy array, the "
f"expected data type is floating. Got {y_pred.dtype} instead."
)
err_msg = (
f"When {name}.predict_proba returns a NumPy array, this array "
"is expected to provide probabilities of the positive class "
"and should therefore contain values between 0 and 1."
)
assert_array_less(0, y_pred, err_msg=err_msg)
assert_array_less(y_pred, 1, err_msg=err_msg)
else:
raise ValueError(
f"Unknown returned type {type(y_pred)} by {name}."
"predict_proba. A list or a Numpy array is expected."
)
@ignore_warnings(category=FutureWarning)
def check_classifiers_multilabel_output_format_decision_function(name, classifier_orig):
"""Check the output of the `decision_function` method for classifiers supporting
multilabel-indicator targets."""
classifier = clone(classifier_orig)
set_random_state(classifier)
n_samples, test_size, n_outputs = 100, 25, 5
X, y = make_multilabel_classification(
n_samples=n_samples,
n_features=2,
n_classes=n_outputs,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0,
)
X = scale(X)
X_train, X_test = X[:-test_size], X[-test_size:]
y_train = y[:-test_size]
classifier.fit(X_train, y_train)
response_method_name = "decision_function"
decision_function_method = getattr(classifier, response_method_name, None)
if decision_function_method is None:
raise SkipTest(f"{name} does not have a {response_method_name} method.")
y_pred = decision_function_method(X_test)
# y_pred.shape -> y_test.shape with floating dtype
assert isinstance(y_pred, np.ndarray), (
f"{name}.decision_function is expected to output a NumPy array."
f" Got {type(y_pred)} instead."
)
assert y_pred.shape == (test_size, n_outputs), (
f"{name}.decision_function is expected to provide a NumPy array "
f"of shape (n_samples, n_outputs). Got {y_pred.shape} instead of "
f"{(test_size, n_outputs)}."
)
assert y_pred.dtype.kind == "f", (
f"{name}.decision_function is expected to output a floating dtype."
f" Got {y_pred.dtype} instead."
)
@ignore_warnings(category=FutureWarning)
def check_estimators_fit_returns_self(name, estimator_orig, readonly_memmap=False):
"""Check if self is returned when calling fit."""
X, y = make_blobs(random_state=0, n_samples=21)
# some want non-negative input
X -= X.min()
X = _pairwise_estimator_convert_X(X, estimator_orig)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if readonly_memmap:
X, y = create_memmap_backed_data([X, y])
set_random_state(estimator)
assert estimator.fit(X, y) is estimator
@ignore_warnings
def check_estimators_unfitted(name, estimator_orig):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise a NotFittedError.
"""
# Common test for Regressors, Classifiers and Outlier detection estimators
X, y = _regression_dataset()
estimator = clone(estimator_orig)
for method in (
"decision_function",
"predict",
"predict_proba",
"predict_log_proba",
):
if hasattr(estimator, method):
with raises(NotFittedError):
getattr(estimator, method)(X)
@ignore_warnings(category=FutureWarning)
def check_supervised_y_2d(name, estimator_orig):
tags = _safe_tags(estimator_orig)
rnd = np.random.RandomState(0)
n_samples = 30
X = _pairwise_estimator_convert_X(rnd.uniform(size=(n_samples, 3)), estimator_orig)
y = np.arange(n_samples) % 3
y = _enforce_estimator_tags_y(estimator_orig, y)
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % ", ".join(
[str(w_x) for w_x in w]
)
if not tags["multioutput"]:
# check that we warned if we don't support multi-output
assert len(w) > 0, msg
assert (
"DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected"
in msg
)
assert_allclose(y_pred.ravel(), y_pred_2d.ravel())
@ignore_warnings
def check_classifiers_predictions(X, y, name, classifier_orig):
classes = np.unique(y)
classifier = clone(classifier_orig)
if name == "BernoulliNB":
X = X > X.mean()
set_random_state(classifier)
classifier.fit(X, y)
y_pred = classifier.predict(X)
if hasattr(classifier, "decision_function"):
decision = classifier.decision_function(X)
assert isinstance(decision, np.ndarray)
if len(classes) == 2:
dec_pred = (decision.ravel() > 0).astype(int)
dec_exp = classifier.classes_[dec_pred]
assert_array_equal(
dec_exp,
y_pred,
err_msg=(
"decision_function does not match "
"classifier for %r: expected '%s', got '%s'"
)
% (
classifier,
", ".join(map(str, dec_exp)),
", ".join(map(str, y_pred)),
),
)
elif getattr(classifier, "decision_function_shape", "ovr") == "ovr":
decision_y = np.argmax(decision, axis=1).astype(int)
y_exp = classifier.classes_[decision_y]
assert_array_equal(
y_exp,
y_pred,
err_msg=(
"decision_function does not match "
"classifier for %r: expected '%s', got '%s'"
)
% (classifier, ", ".join(map(str, y_exp)), ", ".join(map(str, y_pred))),
)
# training set performance
if name != "ComplementNB":
# This is a pathological data set for ComplementNB.
# For some specific cases 'ComplementNB' predicts less classes
# than expected
assert_array_equal(np.unique(y), np.unique(y_pred))
assert_array_equal(
classes,
classifier.classes_,
err_msg="Unexpected classes_ attribute for %r: expected '%s', got '%s'"
% (
classifier,
", ".join(map(str, classes)),
", ".join(map(str, classifier.classes_)),
),
)
def _choose_check_classifiers_labels(name, y, y_names):
# Semisupervised classifiers use -1 as the indicator for an unlabeled
# sample.
return (
y
if name in ["LabelPropagation", "LabelSpreading", "SelfTrainingClassifier"]
else y_names
)
def check_classifiers_classes(name, classifier_orig):
X_multiclass, y_multiclass = make_blobs(
n_samples=30, random_state=0, cluster_std=0.1
)
X_multiclass, y_multiclass = shuffle(X_multiclass, y_multiclass, random_state=7)
X_multiclass = StandardScaler().fit_transform(X_multiclass)
# We need to make sure that we have non negative data, for things
# like NMF
X_multiclass -= X_multiclass.min() - 0.1
X_binary = X_multiclass[y_multiclass != 2]
y_binary = y_multiclass[y_multiclass != 2]
X_multiclass = _pairwise_estimator_convert_X(X_multiclass, classifier_orig)
X_binary = _pairwise_estimator_convert_X(X_binary, classifier_orig)
labels_multiclass = ["one", "two", "three"]
labels_binary = ["one", "two"]
y_names_multiclass = np.take(labels_multiclass, y_multiclass)
y_names_binary = np.take(labels_binary, y_binary)
problems = [(X_binary, y_binary, y_names_binary)]
if not _safe_tags(classifier_orig, key="binary_only"):
problems.append((X_multiclass, y_multiclass, y_names_multiclass))
for X, y, y_names in problems:
for y_names_i in [y_names, y_names.astype("O")]:
y_ = _choose_check_classifiers_labels(name, y, y_names_i)
check_classifiers_predictions(X, y_, name, classifier_orig)
labels_binary = [-1, 1]
y_names_binary = np.take(labels_binary, y_binary)
y_binary = _choose_check_classifiers_labels(name, y_binary, y_names_binary)
check_classifiers_predictions(X_binary, y_binary, name, classifier_orig)
@ignore_warnings(category=FutureWarning)
def check_regressors_int(name, regressor_orig):
X, _ = _regression_dataset()
X = _pairwise_estimator_convert_X(X[:50], regressor_orig)
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = _enforce_estimator_tags_y(regressor_orig, y)
rnd = np.random.RandomState(0)
# separate estimators to control random seeds
regressor_1 = clone(regressor_orig)
regressor_2 = clone(regressor_orig)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(float))
pred2 = regressor_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
@ignore_warnings(category=FutureWarning)
def check_regressors_train(
name, regressor_orig, readonly_memmap=False, X_dtype=np.float64
):
X, y = _regression_dataset()
X = X.astype(X_dtype)
X = _pairwise_estimator_convert_X(X, regressor_orig)
y = scale(y) # X is already scaled
regressor = clone(regressor_orig)
y = _enforce_estimator_tags_y(regressor, y)
if name in CROSS_DECOMPOSITION:
rnd = np.random.RandomState(0)
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
if readonly_memmap:
X, y, y_ = _create_memmap_backed_data([X, y, y_])
if not hasattr(regressor, "alphas") and hasattr(regressor, "alpha"):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == "PassiveAggressiveRegressor":
regressor.C = 0.01
# raises error on malformed input for fit
with raises(
ValueError,
err_msg=(
f"The classifier {name} does not raise an error when "
"incorrect/malformed input data for fit is passed. The number of "
"training examples is not the same as the number of labels. Perhaps "
"use check_X_y in fit."
),
):
regressor.fit(X, y[:-1])
# fit
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert y_pred.shape == y_.shape
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if not _safe_tags(regressor, key="poor_score"):
assert regressor.score(X, y_) > 0.5
@ignore_warnings
def check_regressors_no_decision_function(name, regressor_orig):
# check that regressors don't have a decision_function, predict_proba, or
# predict_log_proba method.
rng = np.random.RandomState(0)
regressor = clone(regressor_orig)
X = rng.normal(size=(10, 4))
X = _pairwise_estimator_convert_X(X, regressor_orig)
y = _enforce_estimator_tags_y(regressor, X[:, 0])
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
assert not hasattr(regressor, func_name)
@ignore_warnings(category=FutureWarning)
def check_class_weight_classifiers(name, classifier_orig):
if _safe_tags(classifier_orig, key="binary_only"):
problems = [2]
else:
problems = [2, 3]
for n_centers in problems:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0
)
# can't use gram_if_pairwise() here, setting up gram matrix manually
if _is_pairwise(classifier_orig):
X_test = rbf_kernel(X_test, X_train)
X_train = rbf_kernel(X_train, X_train)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
classifier = clone(classifier_orig).set_params(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
if hasattr(classifier, "n_iter_no_change"):
classifier.set_params(n_iter_no_change=20)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
# XXX: Generally can use 0.89 here. On Windows, LinearSVC gets
# 0.88 (Issue #9111)
if not _safe_tags(classifier_orig, key="poor_score"):
assert np.mean(y_pred == 0) > 0.87
@ignore_warnings(category=FutureWarning)
def check_class_weight_balanced_classifiers(
name, classifier_orig, X_train, y_train, X_test, y_test, weights
):
classifier = clone(classifier_orig)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight="balanced")
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert f1_score(y_test, y_pred_balanced, average="weighted") > f1_score(
y_test, y_pred, average="weighted"
)
@ignore_warnings(category=FutureWarning)
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
# this is run on classes, not instances, though this should be changed
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
if hasattr(classifier, "cv"):
classifier.set_params(cv=3)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight="balanced")
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {
1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes),
}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_allclose(
coef_balanced,
coef_manual,
err_msg="Classifier %s is not computing class_weight=balanced properly." % name,
)
@ignore_warnings(category=FutureWarning)
def check_estimators_overwrite_params(name, estimator_orig):
X, y = make_blobs(random_state=0, n_samples=21)
# some want non-negative input
X -= X.min()
X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert joblib.hash(new_value) == joblib.hash(original_value), (
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value)
)
@ignore_warnings(category=FutureWarning)
def check_no_attributes_set_in_init(name, estimator_orig):
"""Check setting during init."""
try:
# Clone fails if the estimator does not store
# all parameters as an attribute during init
estimator = clone(estimator_orig)
except AttributeError:
raise AttributeError(
f"Estimator {name} should store all parameters as an attribute during init."
)
if hasattr(type(estimator).__init__, "deprecated_original"):
return
init_params = _get_args(type(estimator).__init__)
if IS_PYPY:
# __init__ signature has additional objects in PyPy
for key in ["obj"]:
if key in init_params:
init_params.remove(key)
parents_init_params = [
param
for params_parent in (_get_args(parent) for parent in type(estimator).__mro__)
for param in params_parent
]
# Test for no setting apart from parameters during init
invalid_attr = set(vars(estimator)) - set(init_params) - set(parents_init_params)
assert not invalid_attr, (
"Estimator %s should not set any attribute apart"
" from parameters during init. Found attributes %s."
% (name, sorted(invalid_attr))
)
@ignore_warnings(category=FutureWarning)
def check_sparsify_coefficients(name, estimator_orig):
X = np.array(
[
[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-1, -2],
[2, 2],
[-2, -2],
]
)
y = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
y = _enforce_estimator_tags_y(estimator_orig, y)
est = clone(estimator_orig)
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert sparse.issparse(est.coef_)
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert sparse.issparse(est.coef_)
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
@ignore_warnings(category=FutureWarning)
def check_classifier_data_not_an_array(name, estimator_orig):
X = np.array(
[
[3, 0],
[0, 1],
[0, 2],
[1, 1],
[1, 2],
[2, 1],
[0, 3],
[1, 0],
[2, 0],
[4, 4],
[2, 3],
[3, 2],
]
)
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = np.array([1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2])
y = _enforce_estimator_tags_y(estimator_orig, y)
for obj_type in ["NotAnArray", "PandasDataframe"]:
check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type)
@ignore_warnings(category=FutureWarning)
def check_regressor_data_not_an_array(name, estimator_orig):
X, y = _regression_dataset()
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = _enforce_estimator_tags_y(estimator_orig, y)
for obj_type in ["NotAnArray", "PandasDataframe"]:
check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type)
@ignore_warnings(category=FutureWarning)
def check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type):
if name in CROSS_DECOMPOSITION:
raise SkipTest(
"Skipping check_estimators_data_not_an_array "
"for cross decomposition module as estimators "
"are not deterministic."
)
# separate estimators to control random seeds
estimator_1 = clone(estimator_orig)
estimator_2 = clone(estimator_orig)
set_random_state(estimator_1)
set_random_state(estimator_2)
if obj_type not in ["NotAnArray", "PandasDataframe"]:
raise ValueError("Data type {0} not supported".format(obj_type))
if obj_type == "NotAnArray":
y_ = _NotAnArray(np.asarray(y))
X_ = _NotAnArray(np.asarray(X))
else:
# Here pandas objects (Series and DataFrame) are tested explicitly
# because some estimators may handle them (especially their indexing)
# specially.
try:
import pandas as pd
y_ = np.asarray(y)
if y_.ndim == 1:
y_ = pd.Series(y_)
else:
y_ = pd.DataFrame(y_)
X_ = pd.DataFrame(np.asarray(X))
except ImportError:
raise SkipTest(
"pandas is not installed: not checking estimators for pandas objects."
)
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
def check_parameters_default_constructible(name, Estimator):
# test default-constructibility
# get rid of deprecation warnings
Estimator = Estimator.__class__
with ignore_warnings(category=FutureWarning):
estimator = _construct_instance(Estimator)
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert estimator.set_params() is estimator
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, "deprecated_original", estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator."""
return (
p.name != "self"
and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL
)
init_params = [
p for p in signature(init).parameters.values() if param_filter(p)
]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
# they can need a non-default argument
init_params = init_params[len(getattr(estimator, "_required_parameters", [])) :]
for init_param in init_params:
assert (
init_param.default != init_param.empty
), "parameter %s for %s has no default value" % (
init_param.name,
type(estimator).__name__,
)
allowed_types = {
str,
int,
float,
bool,
tuple,
type(None),
type,
types.FunctionType,
joblib.Memory,
}
# Any numpy numeric such as np.int32.
allowed_types.update(np.core.numerictypes.allTypes.values())
assert type(init_param.default) in allowed_types, (
f"Parameter '{init_param.name}' of estimator "
f"'{Estimator.__name__}' is of type "
f"{type(init_param.default).__name__} which is not "
"allowed. All init parameters have to be immutable to "
"make cloning possible. Therefore we restrict the set of "
"legal types to "
f"{set(type.__name__ for type in allowed_types)}."
)
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert init_param.default is None, (
f"Estimator parameter '{init_param.name}' of estimator "
f"'{Estimator.__name__}' is not returned by get_params. "
"If it is deprecated, set its default value to None."
)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
failure_text = (
f"Parameter {init_param.name} was mutated on init. All "
"parameters must be stored unchanged."
)
if is_scalar_nan(param_value):
# Allows to set default parameters to np.nan
assert param_value is init_param.default, failure_text
else:
assert param_value == init_param.default, failure_text
def _enforce_estimator_tags_y(estimator, y):
# Estimators with a `requires_positive_y` tag only accept strictly positive
# data
if _safe_tags(estimator, key="requires_positive_y"):
# Create strictly positive y. The minimal increment above 0 is 1, as
# y could be of integer dtype.
y += 1 + abs(y.min())
# Estimators with a `binary_only` tag only accept up to two unique y values
if _safe_tags(estimator, key="binary_only") and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if _safe_tags(estimator, key="multioutput_only"):
return np.reshape(y, (-1, 1))
return y
def _enforce_estimator_tags_x(estimator, X):
# Pairwise estimators only accept
# X of shape (`n_samples`, `n_samples`)
if _is_pairwise(estimator):
X = X.dot(X.T)
# Estimators with `1darray` in `X_types` tag only accept
# X of shape (`n_samples`,)
if "1darray" in _safe_tags(estimator, key="X_types"):
X = X[:, 0]
# Estimators with a `requires_positive_X` tag only accept
# strictly positive data
if _safe_tags(estimator, key="requires_positive_X"):
X -= X.min()
if "categorical" in _safe_tags(estimator, key="X_types"):
X = (X - X.min()).astype(np.int32)
return X
@ignore_warnings(category=FutureWarning)
def check_non_transformer_estimators_n_iter(name, estimator_orig):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
# SelfTrainingClassifier does not perform an iteration if all samples are
# labeled, hence n_iter_ = 0 is valid.
not_run_check_n_iter = [
"Ridge",
"RidgeClassifier",
"RandomizedLasso",
"LogisticRegressionCV",
"LinearSVC",
"LogisticRegression",
"SelfTrainingClassifier",
]
# Tested in test_transformer_n_iter
not_run_check_n_iter += CROSS_DECOMPOSITION
if name in not_run_check_n_iter:
return
# LassoLars stops early for the default alpha=1.0 the iris dataset.
if name == "LassoLars":
estimator = clone(estimator_orig).set_params(alpha=0.0)
else:
estimator = clone(estimator_orig)
if hasattr(estimator, "max_iter"):
iris = load_iris()
X, y_ = iris.data, iris.target
y_ = _enforce_estimator_tags_y(estimator, y_)
set_random_state(estimator, 0)
X = _pairwise_estimator_convert_X(X, estimator_orig)
estimator.fit(X, y_)
assert np.all(estimator.n_iter_ >= 1)
@ignore_warnings(category=FutureWarning)
def check_transformer_n_iter(name, estimator_orig):
# Test that transformers with a parameter max_iter, return the
# attribute of n_iter_ at least 1.
estimator = clone(estimator_orig)
if hasattr(estimator, "max_iter"):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [2.0, 5.0, 4.0]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert iter_ >= 1
else:
assert estimator.n_iter_ >= 1
@ignore_warnings(category=FutureWarning)
def check_get_params_invariance(name, estimator_orig):
# Checks if get_params(deep=False) is a subset of get_params(deep=True)
e = clone(estimator_orig)
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert all(item in deep_params.items() for item in shallow_params.items())
@ignore_warnings(category=FutureWarning)
def check_set_params(name, estimator_orig):
# Check that get_params() returns the same thing
# before and after set_params() with some fuzz
estimator = clone(estimator_orig)
orig_params = estimator.get_params(deep=False)
msg = "get_params result does not match what was passed to set_params"
estimator.set_params(**orig_params)
curr_params = estimator.get_params(deep=False)
assert set(orig_params.keys()) == set(curr_params.keys()), msg
for k, v in curr_params.items():
assert orig_params[k] is v, msg
# some fuzz values
test_values = [-np.inf, np.inf, None]
test_params = deepcopy(orig_params)
for param_name in orig_params.keys():
default_value = orig_params[param_name]
for value in test_values:
test_params[param_name] = value
try:
estimator.set_params(**test_params)
except (TypeError, ValueError) as e:
e_type = e.__class__.__name__
# Exception occurred, possibly parameter validation
warnings.warn(
"{0} occurred during set_params of param {1} on "
"{2}. It is recommended to delay parameter "
"validation until fit.".format(e_type, param_name, name)
)
change_warning_msg = (
"Estimator's parameters changed after set_params raised {}".format(
e_type
)
)
params_before_exception = curr_params
curr_params = estimator.get_params(deep=False)
try:
assert set(params_before_exception.keys()) == set(
curr_params.keys()
)
for k, v in curr_params.items():
assert params_before_exception[k] is v
except AssertionError:
warnings.warn(change_warning_msg)
else:
curr_params = estimator.get_params(deep=False)
assert set(test_params.keys()) == set(curr_params.keys()), msg
for k, v in curr_params.items():
assert test_params[k] is v, msg
test_params[param_name] = default_value
@ignore_warnings(category=FutureWarning)
def check_classifiers_regression_target(name, estimator_orig):
# Check if classifier throws an exception when fed regression targets
X, y = _regression_dataset()
X = X + 1 + abs(X.min(axis=0)) # be sure that X is non-negative
e = clone(estimator_orig)
msg = "Unknown label type: "
if not _safe_tags(e, key="no_validation"):
with raises(ValueError, match=msg):
e.fit(X, y)
@ignore_warnings(category=FutureWarning)
def check_decision_proba_consistency(name, estimator_orig):
# Check whether an estimator having both decision_function and
# predict_proba methods has outputs with perfect rank correlation.
centers = [(2, 2), (4, 4)]
X, y = make_blobs(
n_samples=100,
random_state=0,
n_features=4,
centers=centers,
cluster_std=1.0,
shuffle=True,
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=0
)
estimator = clone(estimator_orig)
if hasattr(estimator, "decision_function") and hasattr(estimator, "predict_proba"):
estimator.fit(X_train, y_train)
# Since the link function from decision_function() to predict_proba()
# is sometimes not precise enough (typically expit), we round to the
# 10th decimal to avoid numerical issues: we compare the rank
# with deterministic ties rather than get platform specific rank
# inversions in case of machine level differences.
a = estimator.predict_proba(X_test)[:, 1].round(decimals=10)
b = estimator.decision_function(X_test).round(decimals=10)
assert_array_equal(rankdata(a), rankdata(b))
def check_outliers_fit_predict(name, estimator_orig):
# Check fit_predict for outlier detectors.
n_samples = 300
X, _ = make_blobs(n_samples=n_samples, random_state=0)
X = shuffle(X, random_state=7)
n_samples, n_features = X.shape
estimator = clone(estimator_orig)
set_random_state(estimator)
y_pred = estimator.fit_predict(X)
assert y_pred.shape == (n_samples,)
assert y_pred.dtype.kind == "i"
assert_array_equal(np.unique(y_pred), np.array([-1, 1]))
# check fit_predict = fit.predict when the estimator has both a predict and
# a fit_predict method. recall that it is already assumed here that the
# estimator has a fit_predict method
if hasattr(estimator, "predict"):
y_pred_2 = estimator.fit(X).predict(X)
assert_array_equal(y_pred, y_pred_2)
if hasattr(estimator, "contamination"):
# proportion of outliers equal to contamination parameter when not
# set to 'auto'
expected_outliers = 30
contamination = float(expected_outliers) / n_samples
estimator.set_params(contamination=contamination)
y_pred = estimator.fit_predict(X)
num_outliers = np.sum(y_pred != 1)
# num_outliers should be equal to expected_outliers unless
# there are ties in the decision_function values. this can
# only be tested for estimators with a decision_function
# method
if num_outliers != expected_outliers and hasattr(
estimator, "decision_function"
):
decision = estimator.decision_function(X)
check_outlier_corruption(num_outliers, expected_outliers, decision)
# raises error when contamination is a scalar and not in [0,1]
msg = r"contamination must be in \(0, 0.5]"
for contamination in [-0.5, -0.001, 0.5001, 2.3]:
estimator.set_params(contamination=contamination)
with raises(ValueError, match=msg):
estimator.fit_predict(X)
def check_fit_non_negative(name, estimator_orig):
# Check that proper warning is raised for non-negative X
# when tag requires_positive_X is present
X = np.array([[-1.0, 1], [-1.0, 1]])
y = np.array([1, 2])
estimator = clone(estimator_orig)
with raises(ValueError):
estimator.fit(X, y)
def check_fit_idempotent(name, estimator_orig):
# Check that est.fit(X) is the same as est.fit(X).fit(X). Ideally we would
# check that the estimated parameters during training (e.g. coefs_) are
# the same, but having a universal comparison function for those
# attributes is difficult and full of edge cases. So instead we check that
# predict(), predict_proba(), decision_function() and transform() return
# the same results.
check_methods = ["predict", "transform", "decision_function", "predict_proba"]
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
if "warm_start" in estimator.get_params().keys():
estimator.set_params(warm_start=False)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
X = _pairwise_estimator_convert_X(X, estimator)
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = _enforce_estimator_tags_y(estimator, y)
train, test = next(ShuffleSplit(test_size=0.2, random_state=rng).split(X))
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
# Fit for the first time
estimator.fit(X_train, y_train)
result = {
method: getattr(estimator, method)(X_test)
for method in check_methods
if hasattr(estimator, method)
}
# Fit again
set_random_state(estimator)
estimator.fit(X_train, y_train)
for method in check_methods:
if hasattr(estimator, method):
new_result = getattr(estimator, method)(X_test)
if np.issubdtype(new_result.dtype, np.floating):
tol = 2 * np.finfo(new_result.dtype).eps
else:
tol = 2 * np.finfo(np.float64).eps
assert_allclose_dense_sparse(
result[method],
new_result,
atol=max(tol, 1e-9),
rtol=max(tol, 1e-7),
err_msg="Idempotency check failed for method {}".format(method),
)
def check_fit_check_is_fitted(name, estimator_orig):
# Make sure that estimator doesn't pass check_is_fitted before calling fit
# and that passes check_is_fitted once it's fit.
rng = np.random.RandomState(42)
estimator = clone(estimator_orig)
set_random_state(estimator)
if "warm_start" in estimator.get_params():
estimator.set_params(warm_start=False)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
X = _pairwise_estimator_convert_X(X, estimator)
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = _enforce_estimator_tags_y(estimator, y)
if not _safe_tags(estimator).get("stateless", False):
# stateless estimators (such as FunctionTransformer) are always "fit"!
try:
check_is_fitted(estimator)
raise AssertionError(
f"{estimator.__class__.__name__} passes check_is_fitted before being"
" fit!"
)
except NotFittedError:
pass
estimator.fit(X, y)
try:
check_is_fitted(estimator)
except NotFittedError as e:
raise NotFittedError(
"Estimator fails to pass `check_is_fitted` even though it has been fit."
) from e
def check_n_features_in(name, estimator_orig):
# Make sure that n_features_in_ attribute doesn't exist until fit is
# called, and that its value is correct.
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
if "warm_start" in estimator.get_params():
estimator.set_params(warm_start=False)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
X = _pairwise_estimator_convert_X(X, estimator)
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = _enforce_estimator_tags_y(estimator, y)
assert not hasattr(estimator, "n_features_in_")
estimator.fit(X, y)
if hasattr(estimator, "n_features_in_"):
assert estimator.n_features_in_ == X.shape[1]
else:
warnings.warn(
"As of scikit-learn 0.23, estimators should expose a "
"n_features_in_ attribute, unless the 'no_validation' tag is "
"True. This attribute should be equal to the number of features "
"passed to the fit method. "
"An error will be raised from version 1.0 (renaming of 0.25) "
"when calling check_estimator(). "
"See SLEP010: "
"https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep010/proposal.html", # noqa
FutureWarning,
)
def check_requires_y_none(name, estimator_orig):
# Make sure that an estimator with requires_y=True fails gracefully when
# given y=None
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
X = _pairwise_estimator_convert_X(X, estimator)
warning_msg = (
"As of scikit-learn 0.23, estimators should have a "
"'requires_y' tag set to the appropriate value. "
"The default value of the tag is False. "
"An error will be raised from version 1.0 when calling "
"check_estimator() if the tag isn't properly set."
)
expected_err_msgs = (
"requires y to be passed, but the target y is None",
"Expected array-like (array or non-string sequence), got None",
"y should be a 1d array",
)
try:
estimator.fit(X, None)
except ValueError as ve:
if not any(msg in str(ve) for msg in expected_err_msgs):
warnings.warn(warning_msg, FutureWarning)
@ignore_warnings(category=FutureWarning)
def check_n_features_in_after_fitting(name, estimator_orig):
# Make sure that n_features_in are checked after fitting
tags = _safe_tags(estimator_orig)
is_supported_X_types = (
"2darray" in tags["X_types"] or "categorical" in tags["X_types"]
)
if not is_supported_X_types or tags["no_validation"]:
return
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
if "warm_start" in estimator.get_params():
estimator.set_params(warm_start=False)
n_samples = 150
X = rng.normal(size=(n_samples, 8))
X = _enforce_estimator_tags_x(estimator, X)
X = _pairwise_estimator_convert_X(X, estimator)
if is_regressor(estimator):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = _enforce_estimator_tags_y(estimator, y)
estimator.fit(X, y)
assert estimator.n_features_in_ == X.shape[1]
# check methods will check n_features_in_
check_methods = [
"predict",
"transform",
"decision_function",
"predict_proba",
"score",
]
X_bad = X[:, [1]]
msg = f"X has 1 features, but \\w+ is expecting {X.shape[1]} features as input"
for method in check_methods:
if not hasattr(estimator, method):
continue
callable_method = getattr(estimator, method)
if method == "score":
callable_method = partial(callable_method, y=y)
with raises(ValueError, match=msg):
callable_method(X_bad)
# partial_fit will check in the second call
if not hasattr(estimator, "partial_fit"):
return
estimator = clone(estimator_orig)
if is_classifier(estimator):
estimator.partial_fit(X, y, classes=np.unique(y))
else:
estimator.partial_fit(X, y)
assert estimator.n_features_in_ == X.shape[1]
with raises(ValueError, match=msg):
estimator.partial_fit(X_bad, y)
def check_estimator_get_tags_default_keys(name, estimator_orig):
# check that if _get_tags is implemented, it contains all keys from
# _DEFAULT_KEYS
estimator = clone(estimator_orig)
if not hasattr(estimator, "__sklearn_tags__"):
return
tags_keys = set(estimator.__sklearn_tags__().keys())
default_tags_keys = set(_DEFAULT_TAGS.keys())
assert tags_keys.intersection(default_tags_keys) == default_tags_keys, (
f"{name}.__sklearn_tags__() is missing entries for the following default tags"
f": {default_tags_keys - tags_keys.intersection(default_tags_keys)}"
)
def check_dataframe_column_names_consistency(name, estimator_orig):
try:
import pandas as pd
except ImportError:
raise SkipTest(
"pandas is not installed: not checking column name consistency for pandas"
)
tags = _safe_tags(estimator_orig)
is_supported_X_types = (
"2darray" in tags["X_types"] or "categorical" in tags["X_types"]
)
if not is_supported_X_types or tags["no_validation"]:
return
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
X_orig = rng.normal(size=(150, 8))
# Some picky estimators (e.g. SkewedChi2Sampler) only accept skewed positive data.
X_orig -= X_orig.min() + 0.5
X_orig = _enforce_estimator_tags_x(estimator, X_orig)
X_orig = _pairwise_estimator_convert_X(X_orig, estimator)
n_samples, n_features = X_orig.shape
names = np.array([f"col_{i}" for i in range(n_features)])
X = | pd.DataFrame(X_orig, columns=names) | pandas.DataFrame |
"""
this script is meant to assess a dataset along a variety of measures
author: <NAME>
license: MIT
"""
# standard libary
import argparse
from collections import Counter, defaultdict, OrderedDict
import csv
from functools import partial
import json
import os
import re
from typing import List
# third party libraries
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
import numpy as np
import pandas as pd
# project libraries
from speech.dataset_info import AllDatasets, TatoebaDataset
from speech.utils.data_helpers import (
get_record_ids_map, get_dataset_ids, path_to_id, process_text, today_date
)
from speech.utils.io import read_data_json, write_pickle
from speech.utils.visual import plot_count, print_stats, print_symmetric_table
def assess_commonvoice(validated_path:str, max_occurance:int):
val_df = pd.read_csv(validated_path, delimiter='\t',encoding='utf-8')
print(f"there are {val_df.shape[0]} entries/rows in the dataset")
accents=["us", "canada"]
# 231011 rows with accents "us" and "canada", 206653 with us and 24358 with canada
val_df = val_df[val_df.accent.isin(accents)]
print(f"there are {val_df.shape[0]} entries with accents {accents}")
# create vote_diff column to sort the sentences upon
val_df["vote_diff"] = val_df.up_votes - val_df.down_votes
# remove punctiation and lower the case in sentence
val_df['sentence']=val_df['sentence'].str.replace('[^\w\s]','').str.lower()
# sorts by the number of unique utterances in descending order
val_df.sentence.value_counts(sort=True, ascending=False)
# histogram bins
#pd.cut(val_df.sentence.value_counts(sort=True, ascending=False),bin_range).value_counts().sort_index()
# dictionary of frequency counts
count_dict=val_df.sentence.value_counts(sort=True, ascending=False).to_dict()
# filters so utterances only have at most max_occurances
# keeps utterances with highest difference between up_votes and down_votes
val_df, drop_row_count = filter_by_count(val_df, count_dict, max_occurance)
print(f"number of rows dropped: {drop_row_count}")
dirname = os.path.dirname(validated_path)
write_path = os.path.join(dirname, f"validated-{max_occurance}-maxrepeat.tsv")
if os.path.exists(write_path):
print(f"file: {write_path} already exists.")
print("Would you like to rewrite it? y/n")
answer = input()
if answer in ["Y", "y"]:
val_df.to_csv(write_path, sep="\t", index=False)
print(f"file: {write_path} successfully saved")
else:
print("file has not be overwritten. No new file saved")
else:
val_df.to_csv(write_path, sep="\t", index=False)
print(f"file: {write_path} successfully saved")
def filter_by_count(in_df:pd.DataFrame, count_dict:dict, filter_value:int):
"""
filters the dataframe so that seteneces that occur more frequently than
the fitler_value are reduced to a nubmer of occurances equal to the filter value,
sentences to be filters will be done based on the difference between the up_votes and down_votes
"""
drop_row_count = 0
for sentence, count in count_dict.items():
if count > filter_value:
# selecting rows that equal sentence
# then sorting that subset by the vote_diff value in descending order
# then taking the indicies of the rows after the first # of filter_values
drop_index = in_df[in_df.sentence.eq(sentence)]\
.sort_values("vote_diff", ascending=False)\
.iloc[filter_value:,:].index
drop_row_count += len(drop_index)
# dropping the rows in drop_index
in_df = in_df.drop(index=drop_index)
return in_df, drop_row_count
def assess_nsc_tags(transcript_dir:str)->None:
"""This function calculates a variety of statistics on the presence of non-speech
tags in the transcripts of the National Speech Corpus (NSC) dataset.
Arguments:
transcript_dir (str): path to the directory that contains all of the transcripts
A a note, the transcripts are encoded using 'utf-8-sig' which has the '\ufeff' byte order mark,
or BOM, which is used to tell the difference between big- and little-endian UTF-16 encoding.
"""
non_speech_tags = {'<FIL/>', '<SPK/>', '<STA/>', '<NON/>', '<NPS/>', '**'}
trans_dict = dict() # dictionary containing the transcripts
tags_dict = defaultdict(list) # dict record keeping of the non-speech tags
totals = {"words": 0, "lines": 0}
transcript_paths = os.listdir(transcript_dir)
transcript_paths.sort()
for path in transcript_paths:
path = os.path.join(transcript_dir, path)
with open(path, 'r', encoding='utf-8-sig') as fid:
for row in fid:
# clean and split the id and transcript
trans_id, trans = row.strip().split('\t')
# each example has a lower-case trannscript on a second line
# try-except prints the filepath if the second line is missing
try:
trans_lower = next(fid).strip()
except StopIteration:
print(f"file {path} is not have lower-case transcript")
raise StopIteration
# checks that the two transcripts are equal except for case and punctuation
#assert process_text(trans) == trans_lower, \
# f"{path}_{trans_id} transcript is not equal:\n1) {trans} \n2) {trans_lower}"
# records if non-speech-tags are in each line
for word in trans_lower.split(' '):
if word in non_speech_tags:
# records are formated as <path>_<id>
tags_dict[word].append(path+"_"+trans_id)
# increment the total word and line counts
totals['words'] += len(trans_lower)
totals['lines'] += 1
# tally up the non-speech tag counts
tags_tally = dict()
for tag, paths in tags_dict.items():
tags_tally[tag] = {
"total_tags": len(paths),
"tags_per_line": len(paths) / totals['lines'],
"tags_per_word": len(paths) / totals['words'],
"sample_lines": paths[:5]
}
# write the tags tally to json file
print(f"totals: {totals}")
out_file = os.path.join(
os.path.dirname(os.path.normpath(transcript_dir)),
f"tag_stats_{today_date()}.json"
)
with open(out_file, 'w') as fid:
json.dump(tags_tally, fid)
def assess_iphone_models(save_path:str)->None:
"""This function seeks to identify the distribution of iphone models across a random sample of
Speak's userbase. A historgram will be created of the number of users on each iphone model.
Args:
save_path (str): path where iphone count will be saved as pickle
"""
PROJECT_ID = 'speak-v2-2a1f1'
QUERY_LIMIT = 10000
# verify and set the credientials
CREDENTIAL_PATH = "/home/dzubke/awni_speech/speak-v2-2a1f1-d8fc553a3437.json"
# CREDENTIAL_PATH = "/Users/dustin/CS/consulting/firstlayerai/phoneme_classification/src/awni_speech/speak-v2-2a1f1-d8fc553a3437.json"
# set the enviroment variable that `firebase_admin.credentials` will use
os.putenv("GOOGLE_APPLICATION_CREDENTIALS", CREDENTIAL_PATH)
# initialize the credentials and firebase db client
cred = credentials.ApplicationDefault()
firebase_admin.initialize_app(cred, {'projectId': PROJECT_ID})
db = firestore.client()
rec_ref = db.collection(u'recordings')
iphone_model_count = Counter()
n_iphone_models = 100000
while sum(iphone_model_count.values()) < n_iphone_models:
print("inside while loop")
next_query = rec_ref.order_by(u'id').limit(QUERY_LIMIT)
for doc in next_query.stream():
doc = doc.to_dict()
# only select dates in 2020
rec_date = doc.get('info', {}).get('date', None)
if isinstance(rec_date, str):
if rec_date.startswith('2020'):
# get the iphone model
iphone_model = doc.get('user', {}).get('deviceModelIdentifier', None)
if iphone_model is not None:
# iphone_model has the formate 'iPad8,2', so splitting off second half
iphone_model = iphone_model.split(',')[0]
iphone_model_count[iphone_model] += 1
#iphone_model_count = dict(iphone_model_count)
write_pickle(save_path, iphone_model_count)
# plot the iphone model counts
model_names, model_counts = list(zip(*iphone_model_count.most_common()))
plt.plot(model_names, model_counts)
plt.xticks(model_names, model_names, rotation=45)
fig, ax = plt.subplots(constrained_layout=True)
ax.bar(model_names, model_counts)
plt.xticks(model_names, model_names, rotation=45)
total = sum(model_counts)
# plot the aggregate and percent of total values on both axes
def _agg2percent_forward(x, total):
return x/total
def _agg2percent_backward(x, total):
return x*total
# create the forward and backward transforms for the axis
forward_transform = partial(_agg2percent_forward, total=total)
backward_transform = partial(_agg2percent_backward, total=total)
# create the secondary axis
secaxy = ax.secondary_yaxis('right', functions=(forward_transform,
backward_transform))
# add the plot labels for each axis
ax.set_ylabel("Device model count")
secaxy.set_ylabel("Percent of total device count")
plt.xlabel("Device names")
def assess_speak_train(dataset_paths: List[str],
metadata_path:str,
out_dir:str,
use_json:bool=True)->None:
"""This function creates counts of the speaker, lesson, and line ids in a speak training dataset
Args:
dataset_path (str): path to speak training.json dataset
metadata_path (str): path to tsv file that contains speaker, line, and lesson ids
out_dir (str): directory where plots and txt files will be saved
use_json (bool): if true, the data will be read from a training.json file
Returns:
None
"""
def _increment_key(in_dict, key):
in_dict[key] = in_dict.get(key, 0) + 1
# this will read the data from a metadata.tsv file
if not use_json:
# count dictionaries for the lesssons, lines, and users (speakers)
lesson_dict, line_dict, user_dict, target_dict = {}, {}, {}, {}
# create count_dicts for each
with open(metadata_path, 'r') as tsv_file:
tsv_reader = csv.reader(tsv_file, delimiter='\t')
header = next(tsv_reader)
print(header)
for row in tsv_reader:
_increment_key(lesson_dict, row[2])
_increment_key(line_dict, row[3])
_increment_key(user_dict, row[4])
_increment_key(target_dict, process_text(row[1]))
# put the labels and count_dicts in list of the for-loop
constraint_names = ['lesson', 'line', 'speaker', 'target_sent']
counter = {
"lesson": lesson_dict,
"line": line_dict,
"speaker": user_dict,
"target_sent": target_dict
}
# reading from a training.json file supported by a metadata.tsv file
if use_json:
# create mapping from record_id to speaker, line, and lesson ids
rec_ids_map = dict()
constraint_names = ['lesson', 'line', 'speaker', 'target_sent']
counter = {name: dict() for name in constraint_names}
with open(metadata_path, 'r') as tsv_file:
tsv_reader = csv.reader(tsv_file, delimiter='\t')
# header: id, text, lessonId, lineId, uid(speaker_id), date
header = next(tsv_reader)
rec_ids_map = dict()
for row in tsv_reader:
rec_ids_map[row[0]]= {
constraint_names[0]: row[2], # lesson
constraint_names[1]: row[3], # line
constraint_names[2]: row[4], # speaker
constraint_names[3]: process_text(row[1]), # target-sentence
"date": row[6] # date
}
total_date_counter = dict()
# `unq_date_sets` keep track of the unique ids
unq_date_counter = {name: dict() for name in constraint_names}
# iterate through the datasets
for dataset_path in dataset_paths:
dataset = read_data_json(dataset_path)
print(f"dataset {path_to_id(dataset_path)} size is: {len(dataset)}")
# iterate through the exmaples in the dataset
for xmpl in dataset:
rec_id = path_to_id(xmpl['audio'])
date = rec_ids_map[rec_id]['date']
# date has format 2020-09-10T04:24:03.073Z, so splitting
# and joining by '-' using the first two element will be `2020-09`
yyyy_mm_date = '-'.join(date.split('-')[:2])
_increment_key(total_date_counter, yyyy_mm_date)
# iterate through the constraints and update the id counters
for name in constraint_names:
constraint_id = rec_ids_map[rec_id][name]
_increment_key(counter[name], constraint_id)
update_unq_date_counter(
unq_date_counter,
name,
constraint_id,
yyyy_mm_date
)
# create the plots
fig, axs = plt.subplots(1,len(constraint_names))
fig.set_size_inches(8, 6)
# plot and calculate stats of the count_dicts
for ax, name in zip(axs, constraint_names):
plot_count(ax, counter[name], name)
print(f"{name} stats")
print_stats(counter[name])
print()
# ensures the directory of `out_dir` exists
os.makedirs(out_dir, exist_ok=dir)
out_path = os.path.join(out_dir, os.path.basename(out_dir))
print("out_path: ", out_path)
plt.savefig(out_path + "_count_plot.png")
plt.close()
# plot the total_date histogram
fig, ax = plt.subplots(1,1)
dates = sorted(total_date_counter.keys())
date_counts = [total_date_counter[date] for date in dates]
ax.plot(range(len(date_counts)), date_counts)
plt.xticks(range(len(date_counts)), dates, rotation=60)
#ax.set_title(label)
#ax.set_xlabel(f"unique {label}")
#ax.set_ylabel(f"utterance per {label}")
#ax.xaxis.set_major_formatter(tick.FuncFormatter(reformat_large_tick_values));
ax.yaxis.set_major_formatter(tick.FuncFormatter(reformat_large_tick_values));
plt.tight_layout()
plt.savefig(out_path + "_date_count.png")
plt.close()
# plot the unique ids
for name in constraint_names:
fig, ax = plt.subplots(1,1)
date_counts = []
dates = sorted(unq_date_counter[name].keys())
total_count = sum([unq_date_counter[name][date]['count'] for date in dates])
cumulative_count = 0
for date in dates:
cumulative_count += unq_date_counter[name][date]['count']
date_counts.append(round(cumulative_count/total_count, 2))
ax.plot(range(len(date_counts)), date_counts)
plt.xticks(range(len(date_counts)), dates, rotation=60)
ax.set_title(name)
ax.set_xlabel(f"Date")
ax.set_ylabel(f"% of total unique ID's")
#ax.xaxis.set_major_formatter(tick.FuncFormatter(reformat_large_tick_values));
#ax.yaxis.set_major_formatter(tick.FuncFormatter(reformat_large_tick_values));
plt.tight_layout()
plt.savefig(out_path + f"_unq_cum_date_{name}.png")
plt.close()
# sort the lesson_ids and line_ids and write to txt file
for name in counter:
sorted_ids = sorted(list(counter[name].keys()))
with open(f"{out_path}_{name}.txt", 'w') as fid:
for ids in sorted_ids:
fid.write(ids+"\n")
#print("unique lessons")
#print(sorted(list(lesson_dict.keys()))[:200])
#print(f"number of unique lessons: {len(set(lesson_dict.keys()))}")
def dataset_stats(dataset_path:str)->None:
"""This function prints a variety of stats (like mean and std-dev) for the input dataset
Args:
dataset_path (str): path to the dataset
"""
dataset = read_data_json(dataset_path)
data_features = {
"target_len": [len(xmpl['text']) for xmpl in dataset],
"audio_dur": [xmpl['duration'] for xmpl in dataset]
}
stat_functions = {
"mean": np.mean,
"stddev": np.std,
}
print(f"stats for dataset: {os.path.basename(dataset_path)}")
for data_name, data in data_features.items():
for stat_name, stat_fn in stat_functions.items():
print(f"\t{stat_name} of {data_name} is: {round(stat_fn(data), 3)}")
print()
def dataset_overlap(config_path:str)->None:
"""This function assess the overlap between two datasets by the `overlap_key`.
Two metrics are calcualted:
1) coutn of unique overlap_keys / total unique overlap_keys
2) count of total overlaping keys / total records
Config includes:
dataset_list (List[str]): list of dataset paths to compare
metadata_paths (List[str]): path to metadata tsv file
overlap_key (str): key to assess overlap (like speaker_id or target-sentence)
Returns:
None
"""
config = load_config(config_path)
dataset_list = config['dataset_list']
metadata_paths = config['metadata_paths']
overlap_key = config['overlap_key']
print("Arguments")
print(f"list of datasets: {dataset_list}")
print(f"metadata_paths: {metadata_paths}")
print(f"assessing overlap based on key: {overlap_key}")
# combine the record_ids_maps for each metadata path.
# this is necessary because the training metadata.tsv file is disjoint from the
# test and evaluation metadata.
record_ids_map = dict()
has_url_fn = lambda path: 'url' in path
for metadata_path in metadata_paths:
record_ids_map.update(
get_record_ids_map(metadata_path, has_url= has_url_fn(metadata_path))
)
# creates a shorter, pretty name of the dataset
def pretty_data_name(data_name):
"""This function makes the data name shorter and easier to read
"""
data_name = os.path.basename(data_name) # remove the path directories
data_name = os.path.splitext(data_name)[0] # removes extension
data_name = data_name.replace("speak-", "") # remove 'speak-'
data_name = data_name.replace("data_trim", "7M") # changes name for 7M records
data_name = data_name.replace("eval2_data", "eval2-v1") # change the eval2-v1 name
data_name = data_name.replace("_data", "") # removes _data from v4 and v5
data_name = re.sub(r'_2020-..-..', '',data_name) # removes date
return data_name
data_dict = {
pretty_data_name(datapath): get_dataset_ids(datapath)
for datapath in dataset_list
}
# check the record_ids_map contains all of the records in data1 and data2
rec_map_set = set(record_ids_map.keys())
for data_name, data_ids in data_dict.items():
# checks that data_ids are subset of rec_map_set
assert data_ids <= rec_map_set, \
f"{data_name} ids not in record_ids_map:\n {data_ids.difference(rec_map_set)}"
# delete to save memory
del rec_map_set
data_keyid_lists = dict()
for data_name, rec_ids in data_dict.items():
data_keyid_lists[data_name] = [
record_ids_map[rec_id][overlap_key] for rec_id in rec_ids
]
data_keyid_sets = {
data_name: set(key_ids)
for data_name, key_ids in data_keyid_lists.items()
}
data_keyid_counters ={
data_name: Counter(key_ids)
for data_name, key_ids in data_keyid_lists.items()
}
# reference dataset to be analyzed
unq_output = dict()
for ref_name, ref_set in data_keyid_sets.items():
# overlap dataset is reference for overlap exists with base dataset
print(f"Reference dataset: {ref_name}")
unq_output[ref_name] = dict()
for overlap_name, overlap_set in data_keyid_sets.items():
print(f"\tOverlap dataset: {overlap_name}")
count_unq_intersect = len(ref_set.intersection(overlap_set))
perc_unq_interesct = round(count_unq_intersect/len(ref_set), 3)
print(f"\t% of Reference intersecting Overlap:{perc_unq_interesct}\n")
unq_output[ref_name][overlap_name] = perc_unq_interesct
print(f"Fully unique ouputs: \n{unq_output}\n")
print_symmetric_table(unq_output, "Intersect\\Reference", "Unique intersection")
# reference dataset to be analyzed
total_output = dict()
for ref_name, ref_counter in data_keyid_counters.items():
# overlap dataset is reference for overlap exists with base dataset
print(f"Reference dataset: {ref_name}")
total_output[ref_name] = dict()
for overlap_name, _ in data_keyid_counters.items():
print(f"\tOverlap dataset: {overlap_name}")
ref_set, overlap_set = data_keyid_sets[ref_name], data_keyid_sets[overlap_name]
intersect_ids = ref_set.intersection(overlap_set)
total_ref_records = len(data_dict[ref_name])
# count of intersecting records
count_tot_intersect = sum([
ref_counter[int_id] for int_id in intersect_ids
])
perc_total_interesct = round(count_tot_intersect/total_ref_records, 3)
print(f"\tRatio of total intersect to total records: {perc_total_interesct}\n")
total_output[ref_name][overlap_name] = perc_total_interesct
print(f"Total output is:\n{total_output}\n")
print_symmetric_table(total_output, "Intersect\\Reference", "Total intersection")
def update_unq_date_counter(counter:dict, name:str, constraint_id:str, date:str)->dict:
"""This function updates the unq_date_counter by incrementing the count for the constraint
in `name` for `date` if the constraint_id is not already in the `date` set.
Args:
counter (Dict[
name: Dict[
date: Dict[
"count": int,
"set": Set[constraint_id]
]
]
]):
dictionary with structure above. For each constraint_name and for each date-bucket (year-month),
it has a count of the unique occurances of the `constraint_id` as regulated by the Set of `ids`
name (str): name of the constraint e.g. "lesson", "line", or "speaker"
constraint_id (str): id for constraint specified by `name`
date (str): date string of the year and month in the YYYY-MM format e.g. "2019-08"
Returns:
(dict): updated counter dict
"""
# create a date entry if one doesn't exist
if date not in counter[name]:
counter[name][date] = dict()
# create the id-set for the given `date` if it doesn't exist
if "set" not in counter[name][date]:
counter[name][date]["set"] = set()
# if the `constraint_id` is not in the set, increment the date count and add the id to the set
if constraint_id not in counter[name][date]["set"]:
counter[name][date]["count"] = counter[name][date].get("count", 0) + 1
counter[name][date]["set"].add(constraint_id)
return counter
class DurationAssessor():
def __init__(self):
self.datasets = AllDatasets().dataset_list
def duration_report(self, save_path:str):
with open(save_path, 'w') as fid:
for dataset in self.datasets:
duration = dataset.get_duration()
name = str(type(dataset))
out_string = "{0}: {1}\n".format(name, duration)
fid.write(out_string)
class TatoebaAssessor():
def __init__(self):
self.dataset = TatoebaDataset()
def create_report(self):
raise NotImplementedError
def audio_by_speaker(self):
assess_dict = dict()
audio_files = self.dataset.get_audio_files()
def test():
pass
"""
# steps
# 1. join eng_sent and audio_sent on 'id' key
# 2. fitler joined array by `lang`=='eng' to get all English sent with audio
# 3. do further filtering based on rating and lang ability
"""
eng_sent_df = | pd.read_csv(eng_sent_path, sep='\t', header=None, names=['id', 'lang', 'text']) | pandas.read_csv |
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
import plotly as pl
import re
import requests
from .DataFrameUtil import DataFrameUtil as dfUtil
class CreateDataFrame():
"""Classe de serviços para a criação de dataframes utilizados para a construção dos gráficos"""
def __init__(self):
self.dfTimeSeriesCases = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
self.dfTimeSeriesRecover = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')
self.dfTimeSeriesDeath = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
url = 'https://covid19.who.int/WHO-COVID-19-global-table-data.csv'
self.dfRegioes = pd.read_csv(url)
def DataFrameMensal():
pd.options.display.float_format = '{:.0f}'.format # Sem Virgula
# Motando Dataframes
# Coletando dados através de arquivos CSV, disponibilizados online.
url = 'https://covid19.who.int/WHO-COVID-19-global-table-data.csv'
dfRegioes = pd.read_csv(url)
dfTimeSeriesCases = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
dfTimeSeriesRecover = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')
dfTimeSeriesDeath = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
# Coletando dados através de web scrapping
html_source = requests.get("https://www.worldometers.info/coronavirus/").text
html_source = re.sub(r'<.*?>', lambda g: g.group(0).upper(), html_source)
table_MN2 = pd.read_html(html_source)
dfWorldMeters = table_MN2[0]
dfWorldMeters.columns = [column.replace(" ", "_").replace(",", "_").replace("-","").replace("__","_") for column in dfWorldMeters.columns]
# Renomeando colunas, padronização
dfTimeSeriesCases.rename(columns={'Country/Region':'Name'}, inplace=True)
dfTimeSeriesRecover.rename(columns={'Country/Region':'Name'}, inplace=True)
dfTimeSeriesDeath.rename(columns={'Country/Region':'Name'}, inplace=True)
# Normalização de nome de países
dfTimeSeriesCases.loc[249,'Name'] = "United States of America"
dfTimeSeriesRecover.loc[249,'Name'] = "United States of America"
dfTimeSeriesDeath.loc[249,'Name'] = "United States of America"
dfWorldMeters.loc[8, 'Country_Other']= "United States of America"
dfWorldMeters.loc[13, 'Country_Other']= "United Kingdom"
dfRegioes.loc[6, 'Name'] ="United Kingdom"
# Filtrando Dataframes
dfRegioes.columns =[column.replace(" ", "_").replace("-","") for column in dfRegioes.columns]
dfRegioes.query('Name != "Global" and Name != "World" and Cases__cumulative_total > 0 and WHO_Region != "NaN"', inplace=True)
dfWorldMeters.query('Country_Other != "Total: " and Country_Other != "World" and ' +
' Country_Other != "North America" and Country_Other != "South America" and Country_Other != "Asia" and Country_Other != "Europe" ' +
'and Country_Other != "Africa" and Country_Other != "Oceania" and Country_Other != "Total:" and Country_Other != "NaN" and Population != "nan" and Population != "NaN"', inplace=True)
# Ordenando Dataframes
dfRegioes.sort_values(['Name'], inplace=True)
dfWorldMeters.sort_values(['Country_Other'], inplace=True)
# Criando novos dataframes manipulados
selected_columns = dfRegioes[["Name", "WHO_Region"]]
dfRegioesNew = selected_columns.copy()
dfRegioesNew.sort_values(['Name'], inplace=True)
listMonth = ['Jan', 'Fev', 'Mar', 'Abr','Mai','Jun',
'Jul', 'Ago','Set','Out','Nov', 'Dez',
'Jan 21', 'Fev 21', 'Mar 21', 'Abr 21']
dfTimeSeriesCases.drop(['Province/State', 'Lat','Long'], axis=1,inplace=True)
dfTimeSeriesRecover.drop(['Province/State', 'Lat','Long'], axis=1,inplace=True)
dfTimeSeriesDeath.drop(['Province/State', 'Lat','Long'], axis=1,inplace=True)
selected_columns = dfTimeSeriesCases[dfUtil.SelectColumnsMensal()]
dfTimeSeriesCases = selected_columns.copy()
selected_columns = dfTimeSeriesRecover[dfUtil.SelectColumnsMensal()]
dfTimeSeriesRecover = selected_columns.copy()
selected_columns = dfTimeSeriesDeath[dfUtil.SelectColumnsMensal()]
dfTimeSeriesDeath = selected_columns.copy()
selected_columns = dfWorldMeters[["Country_Other", "Population"]]
dfWorldMetersNew = selected_columns.copy()
dfWorldMetersNew.sort_values(['Country_Other'], inplace=True)
dfTimeSeriesCases = dfUtil.RenameColsMesAno(dfTimeSeriesCases)
dfTimeSeriesRecover = dfUtil.RenameColsMesAno(dfTimeSeriesRecover)
dfTimeSeriesDeath = dfUtil.RenameColsMesAno(dfTimeSeriesDeath)
# Renomeando colunas, padronização para merge final dos dataframes
dfRegioesNew.rename(columns={'WHO_Region':'Regiao'}, inplace=True)
dfWorldMetersNew.rename(columns={'Country_Other': 'Name'}, inplace=True)
dfWorldMetersNew.rename(columns={'Population': 'Populacao'}, inplace=True)
dfAux = dfTimeSeriesCases
mapping = dfUtil.CreateMappingMensal(dfTimeSeriesCases)
dfTimeSeriesCases = dfAux.rename(columns=mapping)
dfAux = dfTimeSeriesRecover
mapping = dfUtil.CreateMappingMensal(dfTimeSeriesRecover)
dfTimeSeriesRecover = dfAux.rename(columns=mapping)
dfAux = dfTimeSeriesDeath
mapping = dfUtil.CreateMappingMensal(dfTimeSeriesDeath)
dfTimeSeriesDeath = dfAux.rename(columns=mapping)
#Somando resultados montados através das linhas do Dataframe
dfTimeSeriesCasesSomado = dfUtil.SumRows(dfTimeSeriesCases)
dfTimeSeriesRecoverSomado = dfUtil.SumRows(dfTimeSeriesRecover)
dfTimeSeriesDeathSomado = dfUtil.SumRows(dfTimeSeriesDeath)
# Resetando index dos dataframes
dfRegioesNew.reset_index(drop=True)
dfWorldMetersNew.reset_index(drop=True)
dfTimeSeriesCasesSomado.reset_index(drop=True)
dfTimeSeriesRecoverSomado.reset_index(drop=True)
dfTimeSeriesDeathSomado.reset_index(drop=True)
dfTimeSeriesCasesSomado.sort_values(['Name'], inplace=True)
dfTimeSeriesRecoverSomado.sort_values(['Name'], inplace=True)
dfTimeSeriesDeathSomado.sort_values(['Name'], inplace=True)
dfRegioesNew.sort_values(['Name'], inplace=True)
dfWorldMetersNew.sort_values(['Name'], inplace=True)
# Merge dataframe
dfFinalCases = pd.merge(dfTimeSeriesCasesSomado, dfRegioesNew, on="Name")
dfFinalCases.rename(columns={'WHO_Region': 'Regiao'}, inplace=True)
dfFinalRecover = pd.merge(dfTimeSeriesRecoverSomado, dfRegioesNew, on="Name")
dfFinalRecover.rename(columns={'WHO_Region': 'Regiao'}, inplace=True)
dfFinalDeath = pd.merge(dfTimeSeriesDeathSomado, dfRegioesNew, on="Name")
dfFinalDeath.rename(columns={'WHO_Region': 'Regiao'}, inplace=True)
#MONTANDO NOVO DATAFRAME 2
d = {'Name': [] ,'Mes': [] ,'Recuperado': []}
DataFrameRecover = | pd.DataFrame(data=d) | pandas.DataFrame |
import numpy as np
import pandas as pd
import collections
from scipy.sparse import issparse
def balanced_newick_tree(num_taxa):
if num_taxa%2 != 0:
raise ValueError("There is no balanced tree on {num_taxa} taxa. Please specify an even number.")
from math import floor
def _balanced_newick_subtree(nt, left=False):
if nt == 2:
return "(_,_)"
elif nt==3:
return "((_,_),_)" if left else "(_,(_,_))"
else:
if nt%2==0:
return f"({_balanced_newick_subtree(nt/2, True)},{_balanced_newick_subtree(nt/2)})"
else:
return f"({_balanced_newick_subtree(floor(nt/2) + int(left), True)},{_balanced_newick_subtree(floor(nt/2) + int(not left))})"
newick_string = f"({_balanced_newick_subtree(num_taxa/2, True)},{_balanced_newick_subtree(num_taxa/2)});"
for i in range(0, num_taxa):
newick_string = newick_string.replace('_', str(np.base_repr(i, base=i+3)), 1)
return newick_string
def get_balance(s, asTuple=False):
"""Returns a string formatted 'X|X' which describes the balance of a given split string"""
s = s.split("|")
if not asTuple:
return str(len(s[0])) + '|' + str(len(s[1]))
else:
return (len(s[0]), len(s[1]))
def is_sparse(matrix):
return issparse(matrix)
def frob_norm(matrix, data_table=None):
"""Calculates the Frobenius Norm for a given matrix"""
if data_table is not None:
return sum(val**2 for _, val in data_table.itertuples(index=False))**(1/2)
if is_sparse(matrix):
return sum(matrix[i,j]**2 for i, j in zip(*matrix.nonzero()))**(1/2)
else:
return np.sqrt(sum(val**2 for val in np.nditer(matrix)))
def make_substitution_matrix(subs_prob, k):
matrix = []
for i in range(k):
matrix.append([1-subs_prob if j==i else subs_prob/(k-1) for j in range(k)])
return matrix
def all_splits(num_taxa, trivial=False, only_balance=None, randomise=False):
"""Generates all splits as string-representations
Args:
num_taxa: The number of taxa on the tree.
trivial: Whether or not to calculate trivial splits, default True.
only_trivial: Whether to ONLY create trivial splits, default False.
Returns:
A list of string-representations of splits (using '|'-notation)
"""
k = only_balance
n = num_taxa
taxa_string = "".join(np.base_repr(i, base=n) for i in range(n))
r = 0 if trivial else 1
loop_over = range(r, 2**(n-1) - r)
if randomise:
import random
loop_over = [i for i in loop_over]
random.shuffle(loop_over)
for i in loop_over:
template = format(i, f'0{n}b')
if not only_balance or sum(int(b) for b in template) in [only_balance, n-only_balance]:
if r < sum(int(b) for b in template) < n-r:
left = ""
right = ""
for t, b in zip(taxa_string, template):
if b == '0':
left += t
else:
right += t
if '0' in right:
left, right = right, left
yield f'{left}|{right}'
###
# Functions for reading in a sequence alignment
def read_alignment_from_file(pathToFile, check=True):
file = open(pathToFile, 'r')
alignment = collections.OrderedDict() # Keeps insertion order
currentSeq = ""
for line in file:
if '>' in line:
currentSeq = line.replace('>', '').replace('\n', '')
else:
if currentSeq in alignment:
alignment[currentSeq] += line.replace('\n', '')
else:
alignment[currentSeq] = line.replace('\n', '')
if check:
return alignment if __valid_alignment(alignment) else None
else:
return alignment
def __valid_alignment(alignmentDict):
ordered = isinstance(alignmentDict, collections.OrderedDict) # Must be ordered to avoid incorrect patterns
sameLength = len(set(len(value) for key, value in alignmentDict.items())) == 1 # Must be same length
return sameLength and ordered
def get_pattern_counts(alignment, asNumpyArray=False):
patterns = {}
sequences = list(alignment.values())
sequenceLength = len(sequences[0])
usableSequenceLength = 0
for i in range(sequenceLength):
patternAtSitei = ''.join(s[i] for s in sequences).upper()
if all(c in ('A', 'C', 'G', 'T') for c in patternAtSitei): # Need to handle U (let U = T)
usableSequenceLength += 1
if patternAtSitei in patterns:
patterns[patternAtSitei] += 1
else:
patterns[patternAtSitei] = 1
if asNumpyArray:
patterns = np.array([[key, val] for key, val in patterns.items()])
return patterns, usableSequenceLength
def pattern_counts_to_probs(patterns, seqLen):
newCounts = [float(float(count) / seqLen) for count in patterns[:, 1]]
patternList = patterns[:, 0]
return np.array([patternList, newCounts]).transpose()
def pattern_probs_from_alignment(pathToFile, check=True):
alignment = read_alignment_from_file(pathToFile, check)
counts, sequenceLength = get_pattern_counts(alignment, True)
probs = pattern_counts_to_probs(counts, sequenceLength)
probs = | pd.DataFrame(probs, index=probs[:, 0]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import sys, os
import datetime, time
from math import ceil, floor # ceil : 소수점 이하를 올림, floor : 소수점 이하를 버림
import math
import pickle
import uuid
import base64
import subprocess
from subprocess import Popen
import PyQt5
from PyQt5 import QtCore, QtGui, uic
from PyQt5 import QAxContainer
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import (QApplication, QLabel, QLineEdit, QMainWindow, QDialog, QMessageBox, QProgressBar)
from PyQt5.QtWidgets import *
from PyQt5.QAxContainer import *
import numpy as np
from numpy import NaN, Inf, arange, isscalar, asarray, array
import pandas as pd
import pandas.io.sql as pdsql
from pandas import DataFrame, Series
# Google SpreadSheet Read/Write
import gspread # (추가 설치 모듈)
from oauth2client.service_account import ServiceAccountCredentials # (추가 설치 모듈)
from df2gspread import df2gspread as d2g # (추가 설치 모듈)
from string import ascii_uppercase # 알파벳 리스트
from bs4 import BeautifulSoup
import requests
import logging
import logging.handlers
import sqlite3
import telepot # 텔레그램봇(추가 설치 모듈)
from slacker import Slacker # 슬랙봇(추가 설치 모듈)
import csv
import FinanceDataReader as fdr
# Google Spreadsheet Setting *******************************
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
json_file_name = './secret/xtrader-276902-f5a8b77e2735.json'
credentials = ServiceAccountCredentials.from_json_keyfile_name(json_file_name, scope)
gc = gspread.authorize(credentials)
# XTrader-Stocklist URL
# spreadsheet_url = 'https://docs.google.com/spreadsheets/d/1pLi849EDnjZnaYhphkLButple5bjl33TKZrCoMrim3k/edit#gid=0' # Test Sheet
spreadsheet_url = 'https://docs.google.com/spreadsheets/d/1XE4sk0vDw4fE88bYMDZuJbnP4AF9CmRYHKY6fCXABw4/edit#gid=0' # Sheeet
testsheet_url = 'https://docs.google.com/spreadsheets/d/1pLi849EDnjZnaYhphkLButple5bjl33TKZrCoMrim3k/edit#gid=0'
# spreadsheet 연결 및 worksheet setting
doc = gc.open_by_url(spreadsheet_url)
doc_test = gc.open_by_url(testsheet_url)
shortterm_buy_sheet = doc.worksheet('매수모니터링')
shortterm_sell_sheet = doc.worksheet('매도모니터링')
shortterm_strategy_sheet = doc.worksheet('ST bot')
shortterm_history_sheet = doc.worksheet('매매이력')
condition_history_sheet = doc_test.worksheet('조건식이력')
price_monitoring_sheet = doc_test.worksheet('주가모니터링')
shortterm_history_cols = ['번호', '종목명', '매수가', '매수수량', '매수일', '매수전략', '매수조건', '매도가', '매도수량',
'매도일', '매도전략', '매도구간', '수익률(계산)','수익률', '수익금', '세금+수수료', '확정 수익금']
shortterm_analysis_cols = ['번호', '종목명', '우선순위', '일봉1', '일봉2', '일봉3', '일봉4', '주봉1', '월봉1', '거래량', '기관수급', '외인수급', '개인']
condition_history_cols = ['종목명', '매수가', '매수일','매도가', '매도일', '수익률(계산)', '수익률', '수익금', '세금+수수료']
# 구글 스프레드시트 업데이트를 위한 알파벳리스트(열 이름 얻기위함)
alpha_list = list(ascii_uppercase)
# SQLITE DB Setting *****************************************
DATABASE = 'stockdata.db'
def sqliteconn():
conn = sqlite3.connect(DATABASE)
return conn
# DB에서 종목명으로 종목코드, 종목영, 시장구분 반환
def get_code(종목명체크):
# 종목명이 띄워쓰기, 대소문자 구분이 잘못될 것을 감안해서
# DB 저장 시 종목명체크 컬럼은 띄워쓰기 삭제 및 소문자로 저장됨
# 구글에서 받은 종목명을 띄워쓰기 삭제 및 소문자로 바꿔서 종목명체크와 일치하는 데이터 저장
# 종목명은 DB에 있는 정상 종목명으로 사용하도록 리턴
종목명체크 = 종목명체크.lower().replace(' ', '')
query = """
select 종목코드, 종목명, 시장구분
from 종목코드
where (종목명체크 = '%s')
""" % (종목명체크)
conn = sqliteconn()
df = pd.read_sql(query, con=conn)
conn.close()
return list(df[['종목코드', '종목명', '시장구분']].values)[0]
# 종목코드가 int형일 경우 정상적으로 반환
def fix_stockcode(data):
if len(data)< 6:
for i in range(6 - len(data)):
data = '0'+data
return data
# 구글 스프레드 시트 Import후 DataFrame 반환
def import_googlesheet():
try:
# 1. 매수 모니터링 시트 체크 및 매수 종목 선정
row_data = shortterm_buy_sheet.get_all_values() # 구글 스프레드시트 '매수모니터링' 시트 데이터 get
# 작성 오류 체크를 위한 주요 항목의 위치(index)를 저장
idx_strategy = row_data[0].index('기본매도전략')
idx_buyprice = row_data[0].index('매수가1')
idx_sellprice = row_data[0].index('목표가')
# DB에서 받아올 종목코드와 시장 컬럼 추가
# 번호, 종목명, 매수모니터링, 비중, 시가위치, 매수가1, 매수가2, 매수가3, 기존매도전략, 목표가
row_data[0].insert(2, '종목코드')
row_data[0].insert(3, '시장')
for row in row_data[1:]:
try:
code, name, market = get_code(row[1]) # 종목명으로 종목코드, 종목명, 시장 받아서(get_code 함수) 추가
except Exception as e:
name = ''
code = ''
market = ''
print('구글 매수모니터링 시트 종목명 오류 : %s' % (row[1]))
logger.error('구글 매수모니터링 시트 오류 : %s' % (row[1]))
Telegram('[XTrader]구글 매수모니터링 시트 오류 : %s' % (row[1]))
row[1] = name # 정상 종목명으로 저장
row.insert(2, code)
row.insert(3, market)
data = pd.DataFrame(data=row_data[1:], columns=row_data[0])
# 사전 데이터 정리
data = data[(data['매수모니터링'] == '1') & (data['종목코드']!= '')]
data = data[row_data[0][:row_data[0].index('목표가')+1]]
del data['매수모니터링']
data.to_csv('%s_googlesheetdata.csv'%(datetime.date.today().strftime('%Y%m%d')), encoding='euc-kr', index=False)
# 2. 매도 모니터링 시트 체크(번호, 종목명, 보유일, 매도전략, 매도가)
row_data = shortterm_sell_sheet.get_all_values() # 구글 스프레드시트 '매도모니터링' 시트 데이터 get
# 작성 오류 체크를 위한 주요 항목의 위치(index)를 저장
idx_holding = row_data[0].index('보유일')
idx_strategy = row_data[0].index('매도전략')
idx_loss = row_data[0].index('손절가')
idx_sellprice = row_data[0].index('목표가')
if len(row_data) > 1:
for row in row_data[1:]:
try:
code, name, market = get_code(row[1]) # 종목명으로 종목코드, 종목명, 시장 받아서(get_code 함수) 추가
if row[idx_holding] == '' : raise Exception('보유일 오류')
if row[idx_strategy] == '': raise Exception('매도전략 오류')
if row[idx_loss] == '': raise Exception('손절가 오류')
if row[idx_strategy] == '4' and row[idx_sellprice] == '': raise Exception('목표가 오류')
except Exception as e:
if str(e) != '보유일 오류' and str(e) != '매도전략 오류' and str(e) != '손절가 오류'and str(e) != '목표가 오류': e = '종목명 오류'
print('구글 매도모니터링 시트 오류 : %s, %s' % (row[1], e))
logger.error('구글 매도모니터링 시트 오류 : %s, %s' % (row[1], e))
Telegram('[XTrader]구글 매도모니터링 시트 오류 : %s, %s' % (row[1], e))
# print(data)
print('[XTrader]구글 시트 확인 완료')
# Telegram('[XTrader]구글 시트 확인 완료')
# logger.info('[XTrader]구글 시트 확인 완료')
return data
except Exception as e:
# 구글 시트 import error시 에러 없어을 때 백업한 csv 읽어옴
print("import_googlesheet Error : %s"%e)
logger.error("import_googlesheet Error : %s"%e)
backup_file = datetime.date.today().strftime('%Y%m%d') + '_googlesheetdata.csv'
if backup_file in os.listdir():
data = pd.read_csv(backup_file, encoding='euc-kr')
data = data.fillna('')
data = data.astype(str)
data['종목코드'] = data['종목코드'].apply(fix_stockcode)
print("import googlesheet backup_file")
logger.info("import googlesheet backup_file")
return data
# Telegram Setting *****************************************
with open('./secret/telegram_token.txt', mode='r') as tokenfile:
TELEGRAM_TOKEN = tokenfile.readline().strip()
with open('./secret/chatid.txt', mode='r') as chatfile:
CHAT_ID = int(chatfile.readline().strip())
bot = telepot.Bot(TELEGRAM_TOKEN)
with open('./secret/Telegram.txt', mode='r') as tokenfile:
r = tokenfile.read()
TELEGRAM_TOKEN_yoo = r.split('\n')[0].split(', ')[1]
CHAT_ID_yoo = r.split('\n')[1].split(', ')[1]
bot_yoo = telepot.Bot(TELEGRAM_TOKEN_yoo)
telegram_enable = True
def Telegram(str, send='all'):
try:
if telegram_enable == True:
# if send == 'mc':
# bot.sendMessage(CHAT_ID, str)
# else:
# bot.sendMessage(CHAT_ID, str)
# bot_yoo.sendMessage(CHAT_ID_yoo, str)
bot.sendMessage(CHAT_ID, str)
else:
pass
except Exception as e:
Telegram('[StockTrader]Telegram Error : %s' % e, send='mc')
# Slack Setting ***********************************************
# with open('./secret/slack_token.txt', mode='r') as tokenfile:
# SLACK_TOKEN = tokenfile.readline().strip()
# slack = Slacker(SLACK_TOKEN)
# slack_enable = False
# def Slack(str):
# if slack_enable == True:
# slack.chat.post_message('#log', str)
# else:
# pass
# 매수 후 보유기간 계산 *****************************************
today = datetime.date.today()
def holdingcal(base_date, excluded=(6, 7)): # 예시 base_date = '2018-06-23'
yy = int(base_date[:4]) # 연도
mm = int(base_date[5:7]) # 월
dd = int(base_date[8:10]) # 일
base_d = datetime.date(yy, mm, dd)
delta = 0
while base_d <= today:
if base_d.isoweekday() not in excluded:
delta += 1
base_d += datetime.timedelta(days=1)
return delta # 당일도 1일로 계산됨
# 호가 계산(상한가, 현재가) *************************************
def hogacal(price, diff, market, option):
# diff 0 : 상한가 호가, -1 : 상한가 -1호가
if option == '현재가':
cal_price = price
elif option == '상한가':
cal_price = price * 1.3
if cal_price < 1000:
hogaunit = 1
elif cal_price < 5000:
hogaunit = 5
elif cal_price < 10000:
hogaunit = 10
elif cal_price < 50000:
hogaunit = 50
elif cal_price < 100000 and market == "KOSPI":
hogaunit = 100
elif cal_price < 500000 and market == "KOSPI":
hogaunit = 500
elif cal_price >= 500000 and market == "KOSPI":
hogaunit = 1000
elif cal_price >= 50000 and market == "KOSDAQ":
hogaunit = 100
cal_price = int(cal_price / hogaunit) * hogaunit + (hogaunit * diff)
return cal_price
# 종목별 현재가 크롤링 ******************************************
def crawler_price(code):
code = code[1:]
url = 'https://finance.naver.com/item/sise.nhn?code=%s' % (code)
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
tag = soup.find("td", {"class": "num"})
return int(tag.text.replace(',',''))
로봇거래계좌번호 = None
주문딜레이 = 0.25
초당횟수제한 = 5
## 키움증권 제약사항 - 3.7초에 한번 읽으면 지금까지는 괜찮음
주문지연 = 3700 # 3.7초
로봇스크린번호시작 = 9000
로봇스크린번호종료 = 9999
# Table View 데이터 정리
class PandasModel(QtCore.QAbstractTableModel):
def __init__(self, data=None, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self._data = data
if data is None:
self._data = DataFrame()
def rowCount(self, parent=None):
# return len(self._data.values)
return len(self._data.index)
def columnCount(self, parent=None):
return self._data.columns.size
def data(self, index, role=Qt.DisplayRole):
if index.isValid():
if role == Qt.DisplayRole:
# return QtCore.QVariant(str(self._data.values[index.row()][index.column()]))
return str(self._data.values[index.row()][index.column()])
# return QtCore.QVariant()
return None
def headerData(self, column, orientation, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
return self._data.columns[column]
return int(column + 1)
def update(self, data):
self._data = data
self.reset()
def reset(self):
self.beginResetModel()
# unnecessary call to actually clear data, but recommended by design guidance from Qt docs
# left blank in preliminary testing
self.endResetModel()
def flags(self, index):
return QtCore.Qt.ItemIsEnabled
# 포트폴리오에 사용되는 주식정보 클래스
# TradeShortTerm용 포트폴리오
class CPortStock_ShortTerm(object):
def __init__(self, 번호, 매수일, 종목코드, 종목명, 시장, 매수가, 매수조건, 보유일, 매도전략, 매도구간별조건, 매도구간=1, 매도가=0, 수량=0):
self.번호 = 번호
self.매수일 = 매수일
self.종목코드 = 종목코드
self.종목명 = 종목명
self.시장 = 시장
self.매수가 = 매수가
self.매수조건 = 매수조건
self.보유일 = 보유일
self.매도전략 = 매도전략
self.매도구간별조건 = 매도구간별조건
self.매도구간 = 매도구간
self.매도가 = 매도가
self.수량 = 수량
if self.매도전략 == '2' or self.매도전략 == '3':
self.목표도달 = False # 목표가(매도가) 도달 체크(False 상태로 구간 컷일경우 전량 매도)
self.매도조건 = '' # 구간매도 : B, 목표매도 : T
elif self.매도전략 == '4':
self.sellcount = 0
self.매도단위수량 = 0 # 전략4의 기본 매도 단위는 보유수량의 1/3
self.익절가1도달 = False
self.익절가2도달 = False
self.목표가도달 = False
# TradeLongTerm용 포트폴리오
class CPortStock_LongTerm(object):
def __init__(self, 매수일, 종목코드, 종목명, 시장, 매수가, 수량=0):
self.매수일 = 매수일
self.종목코드 = 종목코드
self.종목명 = 종목명
self.시장 = 시장
self.매수가 = 매수가
self.수량 = 수량
# 기본 로봇용 포트폴리오
class CPortStock(object):
def __init__(self, 매수일, 종목코드, 종목명, 시장, 매수가, 보유일, 매도전략, 매도구간=0, 매도전략변경1=False, 매도전략변경2=False, 수량=0):
self.매수일 = 매수일
self.종목코드 = 종목코드
self.종목명 = 종목명
self.시장 = 시장
self.매수가 = 매수가
self.보유일 = 보유일
self.매도전략 = 매도전략
self.매도구간 = 매도구간
self.매도전략변경1 = 매도전략변경1
self.매도전략변경2 = 매도전략변경2
self.수량 = 수량
# CTrade 거래로봇용 베이스클래스 : OpenAPI와 붙어서 주문을 내는 등을 하는 클래스
class CTrade(object):
def __init__(self, sName, UUID, kiwoom=None, parent=None):
"""
:param sName: 로봇이름
:param UUID: 로봇구분용 id
:param kiwoom: 키움OpenAPI
:param parent: 나를 부른 부모 - 보통은 메인윈도우
"""
# print("CTrade : __init__")
self.sName = sName
self.UUID = UUID
self.sAccount = None # 거래용계좌번호
self.kiwoom = kiwoom
self.parent = parent
self.running = False # 실행상태
self.portfolio = dict() # 포트폴리오 관리 {'종목코드':종목정보}
self.현재가 = dict() # 각 종목의 현재가
# 조건 검색식 종목 읽기
def GetCodes(self, Index, Name, Type):
logger.info("[%s]조건 검색식 종목 읽기"%(self.sName))
# self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition)
# self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer)
# self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition)
try:
self.getConditionLoad()
print('getload 완료')
print('조건 검색 :', Name, int(Index), Type)
codelist = self.sendCondition("0156", Name, int(Index), Type) # 선정된 검색조건식으로 바로 종목 검색
print('GetCodes :', self.codeList)
return self.codeList
except Exception as e:
print("GetCondition_Error")
print(e)
def getConditionLoad(self):
print('getConditionLoad')
self.kiwoom.dynamicCall("GetConditionLoad()")
# receiveConditionVer() 이벤트 메서드에서 루프 종료
self.ConditionLoop = QEventLoop()
self.ConditionLoop.exec_()
def getConditionNameList(self):
print('getConditionNameList')
data = self.kiwoom.dynamicCall("GetConditionNameList()")
conditionList = data.split(';')
del conditionList[-1]
conditionDictionary = {}
for condition in conditionList:
key, value = condition.split('^')
conditionDictionary[int(key)] = value
# print(conditionDictionary)
return conditionDictionary
# 조건식 조회
def sendCondition(self, screenNo, conditionName, conditionIndex, isRealTime):
print("CTrade : sendCondition", screenNo, conditionName, conditionIndex, isRealTime)
isRequest = self.kiwoom.dynamicCall("SendCondition(QString, QString, int, int)",
screenNo, conditionName, conditionIndex, isRealTime)
# receiveTrCondition() 이벤트 메서드에서 루프 종료
# 실시간 검색일 경우 Loop 미적용해서 바로 조회 등록이 되게 해야됨
# if self.조건검색타입 ==0:
self.ConditionLoop = QEventLoop()
self.ConditionLoop.exec_()
# 조건식 조회 중지
def sendConditionStop(self, screenNo, conditionName, conditionIndex):
# print("CTrade : sendConditionStop", screenNo, conditionName, conditionIndex)
isRequest = self.kiwoom.dynamicCall("SendConditionStop(QString, QString, int)",
screenNo, conditionName, conditionIndex)
# 계좌 보유 종목 받음
def InquiryList(self, _repeat=0):
# print("CTrade : InquiryList")
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", self.sAccount)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "비밀번호입력매체구분", '00')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "조회구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "계좌평가잔고내역요청", "opw00018", _repeat, '{:04d}'.format(self.sScreenNo))
self.InquiryLoop = QEventLoop() # 로봇에서 바로 쓸 수 있도록하기 위해서 계좌 조회해서 종목을 받고나서 루프해제시킴
self.InquiryLoop.exec_()
# 금일 매도 종목에 대해서 수익률, 수익금, 수수료 요청(일별종목별실현손익요청)
def DailyProfit(self, 금일매도종목):
_repeat = 0
# self.sAccount = 로봇거래계좌번호
# self.sScreenNo = self.ScreenNumber
시작일자 = datetime.date.today().strftime('%Y%m%d')
cnt = 1
for 종목코드 in 금일매도종목:
# print(self.sScreenNo, 종목코드, 시작일자)
self.update_cnt = len(금일매도종목) - cnt
cnt += 1
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", self.sAccount)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", 종목코드)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "시작일자", 시작일자)
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "일자별종목별실현손익요청", "OPT10072",
_repeat, '{:04d}'.format(self.sScreenNo))
self.DailyProfitLoop = QEventLoop() # 로봇에서 바로 쓸 수 있도록하기 위해서 계좌 조회해서 종목을 받고나서 루프해제시킴
self.DailyProfitLoop.exec_()
# 일별종목별실현손익 응답 결과 구글 업로드
def DailyProfitUpload(self, 매도결과):
# 매도결과 ['종목명','체결량','매입단가','체결가','당일매도손익','손익율','당일매매수수료','당일매매세금']
print(매도결과)
if self.sName == 'TradeShortTerm':
history_sheet = shortterm_history_sheet
history_cols = shortterm_history_cols
elif self.sName == 'TradeCondition':
history_sheet = condition_history_sheet
history_cols = condition_history_cols
try:
code_row = history_sheet.findall(매도결과[0])[-1].row
계산수익률 = round((int(float(매도결과[3])) / int(float(매도결과[2])) - 1) * 100, 2)
cell = alpha_list[history_cols.index('매수가')] + str(code_row) # 매입단가
history_sheet.update_acell(cell, int(float(매도결과[2])))
cell = alpha_list[history_cols.index('매도가')] + str(code_row) # 체결가
history_sheet.update_acell(cell, int(float(매도결과[3])))
cell = alpha_list[history_cols.index('수익률(계산)')] + str(code_row) # 수익률 계산
history_sheet.update_acell(cell, 계산수익률)
cell = alpha_list[history_cols.index('수익률')] + str(code_row) # 손익율
history_sheet.update_acell(cell, 매도결과[5])
cell = alpha_list[history_cols.index('수익금')] + str(code_row) # 손익율
history_sheet.update_acell(cell, int(float(매도결과[4])))
cell = alpha_list[history_cols.index('세금+수수료')] + str(code_row) # 당일매매수수료 + 당일매매세금
history_sheet.update_acell(cell, int(float(매도결과[6])) + int(float(매도결과[7])))
self.DailyProfitLoop.exit()
if self.update_cnt == 0:
print('금일 실현 손익 구글 업로드 완료')
Telegram("[StockTrader]금일 실현 손익 구글 업로드 완료")
logger.info("[StockTrader]금일 실현 손익 구글 업로드 완료")
except:
self.DailyProfitLoop.exit() # 강제 루프 해제
print('[StockTrader]CTrade:DailyProfitUpload_%s 매도 이력 없음' % 매도결과[0])
logger.error('CTrade:DailyProfitUpload_%s 매도 이력 없음' % 매도결과[0])
# 포트폴리오의 상태
def GetStatus(self):
# print("CTrade : GetStatus")
try:
result = []
for p, v in self.portfolio.items():
result.append('%s(%s)[P%s/V%s/D%s]' % (v.종목명.strip(), v.종목코드, v.매수가, v.수량, v.매수일))
return [self.__class__.__name__, self.sName, self.UUID, self.sScreenNo, self.running, len(self.portfolio), ','.join(result)]
except Exception as e:
print('CTrade_GetStatus Error', e)
logger.error('CTrade_GetStatus Error : %s' % e)
def GenScreenNO(self):
"""
:return: 키움증권에서 요구하는 스크린번호를 생성
"""
# print("CTrade : GenScreenNO")
self.SmallScreenNumber += 1
if self.SmallScreenNumber > 9999:
self.SmallScreenNumber = 0
return self.sScreenNo * 10000 + self.SmallScreenNumber
def GetLoginInfo(self, tag):
"""
:param tag:
:return: 로그인정보 호출
"""
# print("CTrade : GetLoginInfo")
return self.kiwoom.dynamicCall('GetLoginInfo("%s")' % tag)
def KiwoomConnect(self):
"""
:return: 키움증권OpenAPI의 CallBack에 대응하는 처리함수를 연결
"""
# print("CTrade : KiwoomConnect")
try:
self.kiwoom.OnEventConnect[int].connect(self.OnEventConnect)
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
self.kiwoom.OnReceiveChejanData[str, int, str].connect(self.OnReceiveChejanData)
self.kiwoom.OnReceiveRealData[str, str, str].connect(self.OnReceiveRealData)
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition)
self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer)
self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition)
except Exception as e:
print("CTrade : [%s]KiwoomConnect Error :"&(self.sName, e))
# logger.info("%s : connected" % self.sName)
def KiwoomDisConnect(self):
"""
:return: Callback 연결해제
"""
# print("CTrade : KiwoomDisConnect")
try:
self.kiwoom.OnEventConnect[int].disconnect(self.OnEventConnect)
except Exception:
pass
try:
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
except Exception:
pass
try:
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].disconnect(self.OnReceiveTrCondition)
except Exception:
pass
try:
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
except Exception:
pass
try:
self.kiwoom.OnReceiveChejanData[str, int, str].disconnect(self.OnReceiveChejanData)
except Exception:
pass
try:
self.kiwoom.OnReceiveConditionVer[int, str].disconnect(self.OnReceiveConditionVer)
except Exception:
pass
try:
self.kiwoom.OnReceiveRealCondition[str, str, str, str].disconnect(self.OnReceiveRealCondition)
except Exception:
pass
try:
self.kiwoom.OnReceiveRealData[str, str, str].disconnect(self.OnReceiveRealData)
except Exception:
pass
# logger.info("%s : disconnected" % self.sName)
def KiwoomAccount(self):
"""
:return: 계좌정보를 읽어옴
"""
# print("CTrade : KiwoomAccount")
ACCOUNT_CNT = self.GetLoginInfo('ACCOUNT_CNT')
ACC_NO = self.GetLoginInfo('ACCNO')
self.account = ACC_NO.split(';')[0:-1]
self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", self.account[0])
self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "d+2예수금요청", "opw00001", 0, '{:04d}'.format(self.sScreenNo))
self.depositLoop = QEventLoop() # self.d2_deposit를 로봇에서 바로 쓸 수 있도록하기 위해서 예수금을 받고나서 루프해제시킴
self.depositLoop.exec_()
# logger.debug("보유 계좌수: %s 계좌번호: %s [%s]" % (ACCOUNT_CNT, self.account[0], ACC_NO))
def KiwoomSendOrder(self, sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo):
"""
OpenAPI 메뉴얼 참조
:param sRQName:
:param sScreenNo:
:param sAccNo:
:param nOrderType:
:param sCode:
:param nQty:
:param nPrice:
:param sHogaGb:
:param sOrgOrderNo:
:return:
"""
# print("CTrade : KiwoomSendOrder")
try:
order = self.kiwoom.dynamicCall(
'SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)',
[sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo])
return order
except Exception as e:
print('CTrade_KiwoomSendOrder Error ', e)
Telegram('[StockTrader]CTrade_KiwoomSendOrder Error: %s' % e, send='mc')
logger.error('CTrade_KiwoomSendOrder Error : %s' % e)
# -거래구분값 확인(2자리)
#
# 00 : 지정가
# 03 : 시장가
# 05 : 조건부지정가
# 06 : 최유리지정가
# 07 : 최우선지정가
# 10 : 지정가IOC
# 13 : 시장가IOC
# 16 : 최유리IOC
# 20 : 지정가FOK
# 23 : 시장가FOK
# 26 : 최유리FOK
# 61 : 장전 시간외단일가매매
# 81 : 장후 시간외종가
# 62 : 시간외단일가매매
#
# -매매구분값 (1 자리)
# 1 : 신규매수
# 2 : 신규매도
# 3 : 매수취소
# 4 : 매도취소
# 5 : 매수정정
# 6 : 매도정정
def KiwoomSetRealReg(self, sScreenNo, sCode, sRealType='0'):
"""
OpenAPI 메뉴얼 참조
:param sScreenNo:
:param sCode:
:param sRealType:
:return:
"""
# print("CTrade : KiwoomSetRealReg")
ret = self.kiwoom.dynamicCall('SetRealReg(QString, QString, QString, QString)', sScreenNo, sCode, '9001;10',
sRealType)
return ret
def KiwoomSetRealRemove(self, sScreenNo, sCode):
"""
OpenAPI 메뉴얼 참조
:param sScreenNo:
:param sCode:
:return:
"""
# print("CTrade : KiwoomSetRealRemove")
ret = self.kiwoom.dynamicCall('SetRealRemove(QString, QString)', sScreenNo, sCode)
return ret
def OnEventConnect(self, nErrCode):
"""
OpenAPI 메뉴얼 참조
:param nErrCode:
:return:
"""
# print("CTrade : OnEventConnect")
logger.debug('OnEventConnect', nErrCode)
def OnReceiveMsg(self, sScrNo, sRQName, sTRCode, sMsg):
"""
OpenAPI 메뉴얼 참조
:param sScrNo:
:param sRQName:
:param sTRCode:
:param sMsg:
:return:
"""
# print("CTrade : OnReceiveMsg")
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTRCode, sMsg))
# self.InquiryLoop.exit()
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
"""
OpenAPI 메뉴얼 참조
:param sScrNo:
:param sRQName:
:param sTRCode:
:param sRecordName:
:param sPreNext:
:param nDataLength:
:param sErrorCode:
:param sMessage:
:param sSPlmMsg:
:return:
"""
# print('CTrade : OnReceiveTrData')
try:
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo[:4]):
return
if 'B_' in sRQName or 'S_' in sRQName:
주문번호 = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, 0, "주문번호")
# logger.debug("화면번호: %s sRQName : %s 주문번호: %s" % (sScrNo, sRQName, 주문번호))
self.주문등록(sRQName, 주문번호)
if sRQName == "d+2예수금요청":
data = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)',sTRCode, "", sRQName, 0, "d+2추정예수금")
# 입력된 문자열에 대해 lstrip 메서드를 통해 문자열 왼쪽에 존재하는 '-' 또는 '0'을 제거. 그리고 format 함수를 통해 천의 자리마다 콤마를 추가한 문자열로 변경
strip_data = data.lstrip('-0')
if strip_data == '':
strip_data = '0'
format_data = format(int(strip_data), ',d')
if data.startswith('-'):
format_data = '-' + format_data
self.sAsset = format_data
self.depositLoop.exit() # self.d2_deposit를 로봇에서 바로 쓸 수 있도록하기 위해서 예수금을 받고나서 루프해제시킴
if sRQName == "계좌평가잔고내역요청":
print("계좌평가잔고내역요청_수신")
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
self.CList = []
for i in range(0, cnt):
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, '종목번호').strip().lstrip('0')
# print(S)
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
S = self.종목코드변환(S) # 종목코드 맨 첫 'A'를 삭제하기 위함
self.CList.append(S)
# logger.debug("%s" % row)
if sPreNext == '2':
self.remained_data = True
self.InquiryList(_repeat=2)
else:
self.remained_data = False
print(self.CList)
self.InquiryLoop.exit()
if sRQName == "일자별종목별실현손익요청":
try:
data_idx = ['종목명', '체결량', '매입단가', '체결가', '당일매도손익', '손익율', '당일매매수수료', '당일매매세금']
result = []
for idx in data_idx:
data = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode,
"",
sRQName, 0, idx)
result.append(data.strip())
self.DailyProfitUpload(result)
except Exception as e:
print(e)
logger.error('일자별종목별실현손익요청 Error : %s' % e)
except Exception as e:
print('CTrade_OnReceiveTrData Error ', e)
Telegram('[StockTrader]CTrade_OnReceiveTrData Error : %s' % e, send='mc')
logger.error('CTrade_OnReceiveTrData Error : %s' % e)
def OnReceiveChejanData(self, sGubun, nItemCnt, sFidList):
"""
OpenAPI 메뉴얼 참조
:param sGubun:
:param nItemCnt:
:param sFidList:
:return:
"""
# logger.debug('OnReceiveChejanData [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
# 주문체결시 순서
# 1 구분:0 GetChejanData(913) = '접수'
# 2 구분:0 GetChejanData(913) = '체결'
# 3 구분:1 잔고정보
"""
# sFid별 주요데이터는 다음과 같습니다.
# "9201" : "계좌번호"
# "9203" : "주문번호"
# "9001" : "종목코드"
# "913" : "주문상태"
# "302" : "종목명"
# "900" : "주문수량"
# "901" : "주문가격"
# "902" : "미체결수량"
# "903" : "체결누계금액"
# "904" : "원주문번호"
# "905" : "주문구분"
# "906" : "매매구분"
# "907" : "매도수구분"
# "908" : "주문/체결시간"
# "909" : "체결번호"
# "910" : "체결가"
# "911" : "체결량"
# "10" : "현재가"
# "27" : "(최우선)매도호가"
# "28" : "(최우선)매수호가"
# "914" : "단위체결가"
# "915" : "단위체결량"
# "919" : "거부사유"
# "920" : "화면번호"
# "917" : "신용구분"
# "916" : "대출일"
# "930" : "보유수량"
# "931" : "매입단가"
# "932" : "총매입가"
# "933" : "주문가능수량"
# "945" : "당일순매수수량"
# "946" : "매도/매수구분"
# "950" : "당일총매도손일"
# "951" : "예수금"
# "307" : "기준가"
# "8019" : "손익율"
# "957" : "신용금액"
# "958" : "신용이자"
# "918" : "만기일"
# "990" : "당일실현손익(유가)"
# "991" : "당일실현손익률(유가)"
# "992" : "당일실현손익(신용)"
# "993" : "당일실현손익률(신용)"
# "397" : "파생상품거래단위"
# "305" : "상한가"
# "306" : "하한가"
"""
# print("CTrade : OnReceiveChejanData")
try:
# 접수
if sGubun == "0":
# logger.debug('OnReceiveChejanData: 접수 [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
화면번호 = self.kiwoom.dynamicCall('GetChejanData(QString)', 920)
if len(화면번호.replace(' ','')) == 0 : # 로봇 실행중 영웅문으로 주문 발생 시 화면번호가 ' '로 들어와 에러발생함 방지
print('다른 프로그램을 통한 거래 발생')
Telegram('다른 프로그램을 통한 거래 발생', send='mc')
logger.info('다른 프로그램을 통한 거래 발생')
return
elif self.sScreenNo != int(화면번호[:4]):
return
param = dict()
param['sGubun'] = sGubun
param['계좌번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 9201)
param['주문번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 9203)
param['종목코드'] = self.종목코드변환(self.kiwoom.dynamicCall('GetChejanData(QString)', 9001))
param['주문업무분류'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 912)
# 접수 / 체결 확인
# 주문상태(10:원주문, 11:정정주문, 12:취소주문, 20:주문확인, 21:정정확인, 22:취소확인, 90-92:주문거부)
param['주문상태'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 913) # 접수 or 체결 확인
param['종목명'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 302).strip()
param['주문수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 900)
param['주문가격'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 901)
param['미체결수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 902)
param['체결누계금액'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 903)
param['원주문번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 904)
param['주문구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 905)
param['매매구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 906)
param['매도수구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 907)
param['체결시간'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 908)
param['체결번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 909)
param['체결가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 910)
param['체결량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 911)
param['현재가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 10)
param['매도호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 27)
param['매수호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 28)
param['단위체결가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 914).strip()
param['단위체결량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 915)
param['화면번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 920)
param['당일매매수수료'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 938)
param['당일매매세금'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 939)
param['체결수량'] = int(param['주문수량']) - int(param['미체결수량'])
logger.debug('접수 - 주문상태:{주문상태} 계좌번호:{계좌번호} 체결시간:{체결시간} 주문번호:{주문번호} 체결번호:{체결번호} 종목코드:{종목코드} 종목명:{종목명} 체결량:{체결량} 체결가:{체결가} 단위체결가:{단위체결가} 주문수량:{주문수량} 체결수량:{체결수량} 단위체결량:{단위체결량} 미체결수량:{미체결수량} 당일매매수수료:{당일매매수수료} 당일매매세금:{당일매매세금}'.format(**param))
# if param["주문상태"] == "접수":
# self.접수처리(param)
# if param["주문상태"] == "체결": # 매도의 경우 체결로 안들어옴
# self.체결처리(param)
self.체결처리(param)
# 잔고통보
if sGubun == "1":
# logger.debug('OnReceiveChejanData: 잔고통보 [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
param = dict()
param['sGubun'] = sGubun
param['계좌번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 9201)
param['종목코드'] = self.종목코드변환(self.kiwoom.dynamicCall('GetChejanData(QString)', 9001))
param['신용구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 917)
param['대출일'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 916)
param['종목명'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 302).strip()
param['현재가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 10)
param['보유수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 930)
param['매입단가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 931)
param['총매입가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 932)
param['주문가능수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 933)
param['당일순매수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 945)
param['매도매수구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 946)
param['당일총매도손익'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 950)
param['예수금'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 951)
param['매도호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 27)
param['매수호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 28)
param['기준가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 307)
param['손익율'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 8019)
param['신용금액'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 957)
param['신용이자'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 958)
param['만기일'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 918)
param['당일실현손익_유가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 990)
param['당일실현손익률_유가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 991)
param['당일실현손익_신용'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 992)
param['당일실현손익률_신용'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 993)
param['담보대출수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 959)
logger.debug('잔고통보 - 계좌번호:{계좌번호} 종목명:{종목명} 보유수량:{보유수량} 매입단가:{매입단가} 총매입가:{총매입가} 손익율:{손익율} 당일총매도손익:{당일총매도손익} 당일순매수량:{당일순매수량}'.format(**param))
self.잔고처리(param)
# 특이신호
if sGubun == "3":
logger.debug('OnReceiveChejanData: 특이신호 [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
pass
except Exception as e:
print('CTrade_OnReceiveChejanData Error ', e)
Telegram('[StockTrader]CTrade_OnReceiveChejanData Error : %s' % e, send='mc')
logger.error('CTrade_OnReceiveChejanData Error : %s' % e)
def OnReceiveRealData(self, sRealKey, sRealType, sRealData):
"""
OpenAPI 메뉴얼 참조
:param sRealKey:
:param sRealType:
:param sRealData:
:return:
"""
# logger.debug('OnReceiveRealData [%s] [%s] [%s]' % (sRealKey, sRealType, sRealData))
_now = datetime.datetime.now()
try:
if _now.strftime('%H:%M:%S') < '09:00:00': # 9시 이전 데이터 버림(장 시작 전에 테이터 들어오는 것도 많으므로 버리기 위함)
return
if sRealKey not in self.실시간종목리스트: # 리스트에 없는 데이터 버림
return
if sRealType == "주식시세" or sRealType == "주식체결":
param = dict()
param['종목코드'] = self.종목코드변환(sRealKey)
param['체결시간'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 20).strip()
param['현재가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 10).strip()
param['전일대비'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 11).strip()
param['등락률'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 12).strip()
param['매도호가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 27).strip()
param['매수호가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 28).strip()
param['누적거래량'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 13).strip()
param['시가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 16).strip()
param['고가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 17).strip()
param['저가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 18).strip()
param['거래회전율'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 31).strip()
param['시가총액'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 311).strip()
self.실시간데이터처리(param)
except Exception as e:
print('CTrade_OnReceiveRealData Error ', e)
Telegram('[StockTrader]CTrade_OnReceiveRealData Error : %s' % e, send='mc')
logger.error('CTrade_OnReceiveRealData Error : %s' % e)
def OnReceiveTrCondition(self, sScrNo, strCodeList, strConditionName, nIndex, nNext):
print('OnReceiveTrCondition')
try:
if strCodeList == "":
self.ConditionLoop.exit()
return []
self.codeList = strCodeList.split(';')
del self.codeList[-1]
# print(self.codeList)
logger.info("[%s]조건 검색 완료"%(self.sName))
self.ConditionLoop.exit()
print('OnReceiveTrCondition :', self.codeList)
return self.codeList
except Exception as e:
print("OnReceiveTrCondition_Error")
print(e)
def OnReceiveConditionVer(self, lRet, sMsg):
print('OnReceiveConditionVer')
try:
self.condition = self.getConditionNameList()
except Exception as e:
print("CTrade : OnReceiveConditionVer_Error")
finally:
self.ConditionLoop.exit()
def OnReceiveRealCondition(self, sTrCode, strType, strConditionName, strConditionIndex):
# print("CTrade : OnReceiveRealCondition")
# OpenAPI 메뉴얼 참조
# :param sTrCode:
# :param strType:
# :param strConditionName:
# :param strConditionIndex:
# :return:
_now = datetime.datetime.now().strftime('%H:%M:%S')
if (_now >= '10:00:00' and _now < '13:00:00') or _now >= '15:17:00': # 10시부터 13시 이전 데이터 버림, 15시 17분 당일 매도 처리 후 데이터 버림
return
# logger.info('OnReceiveRealCondition [%s] [%s] [%s] [%s]' % (sTrCode, strType, strConditionName, strConditionIndex))
print("실시간조검검색_종목코드: %s %s / Time : %s"%(sTrCode, "종목편입" if strType == "I" else "종목이탈", _now))
if strType == 'I':
self.실시간조건처리(sTrCode)
def 종목코드변환(self, code): # TR 통해서 받은 종목 코드에 A가 붙을 경우 삭제
return code.replace('A', '')
def 정량매수(self, sRQName, 종목코드, 매수가, 수량):
# sRQName = '정량매수%s' % self.sScreenNo
sScreenNo = self.GenScreenNO() # 주문을 낼때 마다 스크린번호를 생성
sAccNo = self.sAccount
nOrderType = 1 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 수량
nPrice = 매수가
sHogaGb = self.매수방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo)
return ret
def 정액매수(self, sRQName, 종목코드, 매수가, 매수금액):
# sRQName = '정액매수%s' % self.sScreenNo
try:
sScreenNo = self.GenScreenNO()
sAccNo = self.sAccount
nOrderType = 1 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 매수금액 // 매수가
nPrice = 매수가
sHogaGb = self.매수방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
# logger.debug('주문 - %s %s %s %s %s %s %s %s %s', sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo)
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb,
sOrgOrderNo)
return ret
except Exception as e:
print('CTrade_정액매수 Error ', e)
Telegram('[StockTrader]CTrade_정액매수 Error : %s' % e, send='mc')
logger.error('CTrade_정액매수 Error : %s' % e)
def 정량매도(self, sRQName, 종목코드, 매도가, 수량):
# sRQName = '정량매도%s' % self.sScreenNo
try:
sScreenNo = self.GenScreenNO()
sAccNo = self.sAccount
nOrderType = 2 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 수량
nPrice = 매도가
sHogaGb = self.매도방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb,
sOrgOrderNo)
return ret
except Exception as e:
print('[%s]정량매도 Error '%(self.sName,e))
Telegram('[StockTrader][%s]정량매도 Error : %s' % (self.sName, e), send='mc')
logger.error('[%s]정량매도 Error : %s' % (self.sName, e))
def 정액매도(self, sRQName, 종목코드, 매도가, 수량):
# sRQName = '정액매도%s' % self.sScreenNo
sScreenNo = self.GenScreenNO()
sAccNo = self.sAccount
nOrderType = 2 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 수량
nPrice = 매도가
sHogaGb = self.매도방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb,
sOrgOrderNo)
return ret
def 주문등록(self, sRQName, 주문번호):
self.주문번호_주문_매핑[주문번호] = sRQName
Ui_계좌정보조회, QtBaseClass_계좌정보조회 = uic.loadUiType("./UI/계좌정보조회.ui")
class 화면_계좌정보(QDialog, Ui_계좌정보조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_계좌정보, self).__init__(parent) # Initialize하는 형식
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['종목번호', '종목명', '현재가', '보유수량', '매입가', '매입금액', '평가금액', '수익률(%)', '평가손익', '매매가능수량']
self.보이는컬럼 = ['종목번호', '종목명', '현재가', '보유수량', '매입가', '매입금액', '평가금액', '수익률(%)', '평가손익', '매매가능수량'] # 주당 손익 -> 수익률(%)
self.result = []
self.KiwoomAccount()
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def KiwoomAccount(self):
ACCOUNT_CNT = self.kiwoom.dynamicCall('GetLoginInfo("ACCOUNT_CNT")')
ACC_NO = self.kiwoom.dynamicCall('GetLoginInfo("ACCNO")')
self.account = ACC_NO.split(';')[0:-1] # 계좌번호가 ;가 붙어서 나옴(에로 계좌가 3개면 111;222;333)
self.comboBox.clear()
self.comboBox.addItems(self.account)
logger.debug("보유 계좌수: %s 계좌번호: %s [%s]" % (ACCOUNT_CNT, self.account[0], ACC_NO))
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (
sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if sRQName == "계좌평가잔고내역요청":
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
# print(j)
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, j).strip().lstrip('0')
# print(S)
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
row.append(S)
self.result.append(row)
# logger.debug("%s" % row)
if sPreNext == '2':
self.Request(_repeat=2)
else:
self.model.update(DataFrame(data=self.result, columns=self.보이는컬럼))
print(self.result)
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
계좌번호 = self.comboBox.currentText().strip()
logger.debug("계좌번호 %s" % 계좌번호)
# KOA StudioSA에서 opw00018 확인
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", 계좌번호) # 8132495511
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "비밀번호입력매체구분", '00')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "조회구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "계좌평가잔고내역요청", "opw00018", _repeat,'{:04d}'.format(self.sScreenNo))
# 조회 버튼(QtDesigner에서 조회버튼 누르고 오른쪽 하단에 시그널/슬롯편집기를 보면 조회버튼 시그널(clicked), 슬롯(Inquiry())로 확인가능함
def inquiry(self):
self.result = []
self.Request(_repeat=0)
def robot_account(self):
global 로봇거래계좌번호
로봇거래계좌번호 = self.comboBox.currentText().strip()
# sqlite3 사용
try:
with sqlite3.connect(DATABASE) as conn:
cursor = conn.cursor()
robot_account = pickle.dumps(로봇거래계좌번호, protocol=pickle.HIGHEST_PROTOCOL, fix_imports=True)
_robot_account = base64.encodebytes(robot_account)
cursor.execute("REPLACE into Setting(keyword, value) values (?, ?)",
['robotaccount', _robot_account])
conn.commit()
print("로봇 계좌 등록 완료")
except Exception as e:
print('robot_account', e)
Ui_일자별주가조회, QtBaseClass_일자별주가조회 = uic.loadUiType("./UI/일자별주가조회.ui")
class 화면_일별주가(QDialog, Ui_일자별주가조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_일별주가, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('일자별 주가 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['일자', '현재가', '거래량', '시가', '고가', '저가', '거래대금']
self.result = []
d = today
self.lineEdit_date.setText(str(d))
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
if sRQName == "주식일봉차트조회":
종목코드 = ''
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
row.append(S)
self.result.append(row)
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df = DataFrame(data=self.result, columns=self.columns)
df['종목코드'] = self.종목코드
self.model.update(df[['종목코드'] + self.columns])
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
self.종목코드 = self.lineEdit_code.text().strip()
기준일자 = self.lineEdit_date.text().strip().replace('-', '')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", self.종목코드)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "기준일자", 기준일자)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "수정주가구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "주식일봉차트조회", "OPT10081", _repeat,
'{:04d}'.format(self.sScreenNo))
def inquiry(self):
self.result = []
self.Request(_repeat=0)
Ui_분별주가조회, QtBaseClass_분별주가조회 = uic.loadUiType("./UI/분별주가조회.ui")
class 화면_분별주가(QDialog, Ui_분별주가조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_분별주가, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('분별 주가 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['체결시간', '현재가', '시가', '고가', '저가', '거래량']
self.result = []
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
print('화면_분별주가 : OnReceiveTrData')
if self.sScreenNo != int(sScrNo):
return
if sRQName == "주식분봉차트조회":
종목코드 = ''
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and (S[0] == '-' or S[0] == '+'):
S = S[1:].lstrip('0')
row.append(S)
self.result.append(row)
# df = DataFrame(data=self.result, columns=self.columns)
# df.to_csv('분봉.csv', encoding='euc-kr')
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df = DataFrame(data=self.result, columns=self.columns)
df.to_csv('분봉.csv', encoding='euc-kr', index=False)
df['종목코드'] = self.종목코드
self.model.update(df[['종목코드'] + self.columns])
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
self.종목코드 = self.lineEdit_code.text().strip()
틱범위 = self.comboBox_min.currentText()[0:2].strip()
if 틱범위[0] == '0':
틱범위 = 틱범위[1:]
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", self.종목코드)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "틱범위", 틱범위)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "수정주가구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "주식분봉차트조회", "OPT10080", _repeat,
'{:04d}'.format(self.sScreenNo))
def inquiry(self):
self.result = []
self.Request(_repeat=0)
Ui_업종정보, QtBaseClass_업종정보 = uic.loadUiType("./UI/업종정보조회.ui")
class 화면_업종정보(QDialog, Ui_업종정보):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_업종정보, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('업종정보 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['종목코드', '종목명', '현재가', '대비기호', '전일대비', '등락률', '거래량', '비중', '거래대금', '상한', '상승', '보합', '하락', '하한',
'상장종목수']
self.result = []
d = today
self.lineEdit_date.setText(str(d))
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage,
sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
if sRQName == "업종정보조회":
종목코드 = ''
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
row.append(S)
self.result.append(row)
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df = DataFrame(data=self.result, columns=self.columns)
df['업종코드'] = self.업종코드
df.to_csv("업종정보.csv")
self.model.update(df[['업종코드'] + self.columns])
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
self.업종코드 = self.lineEdit_code.text().strip()
기준일자 = self.lineEdit_date.text().strip().replace('-', '')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "업종코드", self.업종코드)
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "업종정보조회", "OPT20003", _repeat,
'{:04d}'.format(self.sScreenNo))
def inquiry(self):
self.result = []
self.Request(_repeat=0)
Ui_업종별주가조회, QtBaseClass_업종별주가조회 = uic.loadUiType("./UI/업종별주가조회.ui")
class 화면_업종별주가(QDialog, Ui_업종별주가조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_업종별주가, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('업종별 주가 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['현재가', '거래량', '일자', '시가', '고가', '저가', '거래대금', '대업종구분', '소업종구분', '종목정보', '수정주가이벤트', '전일종가']
self.result = []
d = today
self.lineEdit_date.setText(str(d))
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage,
sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
if sRQName == "업종일봉조회":
종목코드 = ''
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
row.append(S)
self.result.append(row)
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df = DataFrame(data=self.result, columns=self.columns)
df['업종코드'] = self.업종코드
self.model.update(df[['업종코드'] + self.columns])
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
self.업종코드 = self.lineEdit_code.text().strip()
기준일자 = self.lineEdit_date.text().strip().replace('-', '')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "업종코드", self.업종코드)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "기준일자", 기준일자)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "수정주가구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "업종일봉조회", "OPT20006", _repeat,
'{:04d}'.format(self.sScreenNo))
def inquiry(self):
self.result = []
self.Request(_repeat=0)
class 화면_종목별투자자(QDialog, Ui_일자별주가조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_종목별투자자, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('종목별 투자자 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['일자', '현재가', '전일대비', '누적거래대금', '개인투자자', '외국인투자자', '기관계', '금융투자', '보험', '투신', '기타금융', '은행',
'연기금등', '국가', '내외국인', '사모펀드', '기타법인']
self.result = []
d = today
self.lineEdit_date.setText(str(d))
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
if sRQName == "종목별투자자조회":
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
row.append(S)
self.result.append(row)
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df = DataFrame(data=self.result, columns=self.columns)
df['종목코드'] = self.lineEdit_code.text().strip()
df_new = df[['종목코드'] + self.columns]
self.model.update(df_new)
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
종목코드 = self.lineEdit_code.text().strip()
기준일자 = self.lineEdit_date.text().strip().replace('-', '')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "일자", 기준일자)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", 종목코드)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, int)', "금액수량구분", 2) # 1:금액, 2:수량
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, int)', "매매구분", 0) # 0:순매수, 1:매수, 2:매도
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, int)', "단위구분", 1) # 1000:천주, 1:단주
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "종목별투자자조회", "OPT10060", _repeat,
'{:04d}'.format(self.sScreenNo))
def inquiry(self):
self.result = []
self.Request(_repeat=0)
Ui_TradeShortTerm, QtBaseClass_TradeShortTerm = uic.loadUiType("./UI/TradeShortTerm.ui")
class 화면_TradeShortTerm(QDialog, Ui_TradeShortTerm):
def __init__(self, parent):
super(화면_TradeShortTerm, self).__init__(parent)
self.setupUi(self)
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.result = []
def inquiry(self):
# Google spreadsheet 사용
try:
self.data = import_googlesheet()
print(self.data)
self.model.update(self.data)
for i in range(len(self.data.columns)):
self.tableView.resizeColumnToContents(i)
except Exception as e:
print('화면_TradeShortTerm : inquiry Error ', e)
logger.error('화면_TradeShortTerm : inquiry Error : %s' % e)
class CTradeShortTerm(CTrade): # 로봇 추가 시 __init__ : 복사, Setting, 초기조건:전략에 맞게, 데이터처리~Run:복사
def __init__(self, sName, UUID, kiwoom=None, parent=None):
self.sName = sName
self.UUID = UUID
self.sAccount = None
self.kiwoom = kiwoom
self.parent = parent
self.running = False
self.주문결과 = dict()
self.주문번호_주문_매핑 = dict()
self.주문실행중_Lock = dict()
self.portfolio = dict()
self.실시간종목리스트 = []
self.매수모니터링체크 = False
self.SmallScreenNumber = 9999
self.d = today
# 구글 스프레드시트에서 읽은 DataFrame에서 로봇별 종목리스트 셋팅
def set_stocklist(self, data):
self.Stocklist = dict()
self.Stocklist['컬럼명'] = list(data.columns)
for 종목코드 in data['종목코드'].unique():
temp_list = data[data['종목코드'] == 종목코드].values[0]
self.Stocklist[종목코드] = {
'번호': temp_list[self.Stocklist['컬럼명'].index('번호')],
'종목명': temp_list[self.Stocklist['컬럼명'].index('종목명')],
'종목코드': 종목코드,
'시장': temp_list[self.Stocklist['컬럼명'].index('시장')],
'투자비중': float(temp_list[self.Stocklist['컬럼명'].index('비중')]), # 저장 후 setting 함수에서 전략의 단위투자금을 곱함
'시가위치': list(map(float, temp_list[self.Stocklist['컬럼명'].index('시가위치')].split(','))),
'매수가': list(
int(float(temp_list[list(data.columns).index(col)].replace(',', ''))) for col in data.columns if
'매수가' in col and temp_list[list(data.columns).index(col)] != ''),
'매도전략': temp_list[self.Stocklist['컬럼명'].index('기본매도전략')],
'매도가': list(
int(float(temp_list[list(data.columns).index(col)].replace(',', ''))) for col in data.columns if
'목표가' in col and temp_list[list(data.columns).index(col)] != '')
}
return self.Stocklist
# RobotAdd 함수에서 초기화 다음 셋팅 실행해서 설정값 넘김
def Setting(self, sScreenNo, 매수방법='00', 매도방법='03', 종목리스트=pd.DataFrame()):
try:
self.sScreenNo = sScreenNo
self.실시간종목리스트 = []
self.매수방법 = 매수방법
self.매도방법 = 매도방법
self.종목리스트 = 종목리스트
self.Stocklist = self.set_stocklist(self.종목리스트) # 번호, 종목명, 종목코드, 시장, 비중, 시가위치, 매수가, 매도전략, 매도가
self.Stocklist['전략'] = {
'단위투자금': '',
'모니터링종료시간': '',
'보유일': '',
'투자금비중': '',
'매도구간별조건': [],
'전략매도가': [],
}
row_data = shortterm_strategy_sheet.get_all_values()
for data in row_data:
if data[0] == '단위투자금':
self.Stocklist['전략']['단위투자금'] = int(data[1])
elif data[0] == '매수모니터링 종료시간':
if len(data[1][:-3]) == 1:
data[1] = '0' + data[1]
self.Stocklist['전략']['모니터링종료시간'] = data[1] + ':00'
elif data[0] == '보유일':
self.Stocklist['전략']['보유일'] = int(data[1])
elif data[0] == '투자금 비중':
self.Stocklist['전략']['투자금비중'] = float(data[1][:-1])
# elif data[0] == '손절율':
# self.Stocklist['전략']['매도구간별조건'].append(float(data[1][:-1]))
# elif data[0] == '시가 위치':
# self.Stocklist['전략']['시가위치'] = list(map(int, data[1].split(',')))
elif '구간' in data[0]:
if data[0][-1] != '1' and data[0][-1] != '2':
self.Stocklist['전략']['매도구간별조건'].append(float(data[1][:-1]))
elif '손절가' == data[0]:
self.Stocklist['전략']['전략매도가'].append(float(data[1].replace('%', '')))
elif '본전가' == data[0]:
self.Stocklist['전략']['전략매도가'].append(float(data[1].replace('%', '')))
elif '익절가' in data[0]:
self.Stocklist['전략']['전략매도가'].append(float(data[1].replace('%', '')))
self.Stocklist['전략']['매도구간별조건'].insert(0, self.Stocklist['전략']['전략매도가'][0]) # 손절가
self.Stocklist['전략']['매도구간별조건'].insert(1, self.Stocklist['전략']['전략매도가'][1]) # 본전가
for code in self.Stocklist.keys():
if code == '컬럼명' or code == '전략':
continue
else:
self.Stocklist[code]['단위투자금'] = int(
self.Stocklist[code]['투자비중'] * self.Stocklist['전략']['단위투자금'])
self.Stocklist[code]['시가체크'] = False
self.Stocklist[code]['매수상한도달'] = False
self.Stocklist[code]['매수조건'] = 0
self.Stocklist[code]['매수총수량'] = 0 # 분할매수에 따른 수량체크
self.Stocklist[code]['매수수량'] = 0 # 분할매수 단위
self.Stocklist[code]['매수주문완료'] = 0 # 분할매수에 따른 매수 주문 수
self.Stocklist[code]['매수가전략'] = len(self.Stocklist[code]['매수가']) # 매수 전략에 따른 매수가 지정 수량
if self.Stocklist[code]['매도전략'] == '4':
self.Stocklist[code]['매도가'].append(self.Stocklist['전략']['전략매도가'])
print(self.Stocklist)
except Exception as e:
print('CTradeShortTerm_Setting Error :', e)
Telegram('[XTrader]CTradeShortTerm_Setting Error : %s' % e, send='mc')
logger.error('CTradeShortTerm_Setting Error : %s' % e)
# 수동 포트폴리오 생성
def manual_portfolio(self):
self.portfolio = dict()
self.Stocklist = {
'024840': {'번호': '8.030', '종목명': 'KBI메탈', '종목코드': '024840', '시장': 'KOSDAQ', '매수전략': '1', '매수가': [1468],
'매수조건': 2, '수량': 310, '매도전략': '1', '매도가': [], '매수일': '2020/08/26 09:56:54'},
'097800': {'번호': '7.099', '종목명': '윈팩', '종목코드': '097800', '시장': 'KOSDAQ', '매수전략': '1', '매수가': [3219],
'매수조건': 1, '수량': 310, '매도전략': '4', '매도가': [3700], '매수일': '2020/05/29 09:22:39'},
'297090': {'번호': '7.101', '종목명': '씨에스베어링', '종목코드': '297090', '시장': 'KOSDAQ', '매수전략': '1', '매수가': [5000],
'매수조건': 3, '수량': 15, '매도전략': '2', '매도가': [], '매수일': '2020/06/03 09:12:15'},
}
self.strategy = {'전략': {'단위투자금': 200000, '모니터링종료시간': '10:30:00', '보유일': 20,
'투자금비중': 70.0, '매도구간별조건': [-2.7, 0.3, -3.0, -4.0, -5.0, -7.0],
'전략매도가': [-2.7, 0.3, 3.0, 6.0]}}
for code in list(self.Stocklist.keys()):
self.portfolio[code] = CPortStock_ShortTerm(번호=self.Stocklist[code]['번호'], 종목코드=code,
종목명=self.Stocklist[code]['종목명'],
시장=self.Stocklist[code]['시장'],
매수가=self.Stocklist[code]['매수가'][0],
매수조건=self.Stocklist[code]['매수조건'],
보유일=self.strategy['전략']['보유일'],
매도전략=self.Stocklist[code]['매도전략'],
매도가=self.Stocklist[code]['매도가'],
매도구간별조건=self.strategy['전략']['매도구간별조건'], 매도구간=1,
수량=self.Stocklist[code]['수량'],
매수일=self.Stocklist[code]['매수일'])
# google spreadsheet 매매이력 생성
def save_history(self, code, status):
# 매매이력 sheet에 해당 종목(매수된 종목)이 있으면 row를 반환 아니면 예외처리 -> 신규 매수로 처리
# 매수 이력 : 체결처리, 매수, 미체결수량 0에서 이력 저장
# 매도 이력 : 체결처리, 매도, 미체결수량 0에서 이력 저장
if status == '매도모니터링':
row = []
row.append(self.portfolio[code].번호)
row.append(self.portfolio[code].종목명)
row.append(self.portfolio[code].매수가)
shortterm_sell_sheet.append_row(row)
try:
code_row = shortterm_history_sheet.findall(self.portfolio[code].종목명)[-1].row # 종목명이 있는 모든 셀을 찾아서 맨 아래에 있는 셀을 선택
cell = alpha_list[shortterm_history_cols.index('매도가')] + str(code_row) # 매수 이력에 있는 종목이 매도가 되었는지 확인
sell_price = shortterm_history_sheet.acell(str(cell)).value
# 매도 이력은 추가 매도(매도전략2의 경우)나 신규 매도인 경우라 매도 이력 유무와 상관없음
if status == '매도': # 매도 이력은 포트폴리오에서 종목 pop을 하므로 Stocklist 데이터 사용
cell = alpha_list[shortterm_history_cols.index('매도가')] + str(code_row)
shortterm_history_sheet.update_acell(cell, self.portfolio[code].매도체결가)
cell = alpha_list[shortterm_history_cols.index('매도수량')] + str(code_row)
수량 = shortterm_history_sheet.acell(cell).value # 분할 매도의 경우 이전 매도 수량이 기록되어 있음
if 수량 != '': self.portfolio[code].매도수량 += int(수량) # 매도수량은 주문 수량이므로 기존 수량을 합해줌
shortterm_history_sheet.update_acell(cell, self.portfolio[code].매도수량)
cell = alpha_list[shortterm_history_cols.index('매도일')] + str(code_row)
shortterm_history_sheet.update_acell(cell, datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'))
cell = alpha_list[shortterm_history_cols.index('매도전략')] + str(code_row)
shortterm_history_sheet.update_acell(cell, self.portfolio[code].매도전략)
cell = alpha_list[shortterm_history_cols.index('매도구간')] + str(code_row)
shortterm_history_sheet.update_acell(cell, self.portfolio[code].매도구간)
계산수익률 = round((self.portfolio[code].매도체결가 / self.portfolio[code].매수가 - 1) * 100, 2)
cell = alpha_list[shortterm_history_cols.index('수익률(계산)')] + str(code_row) # 수익률 계산
shortterm_history_sheet.update_acell(cell, 계산수익률)
# 매수 이력은 있으나 매도 이력이 없음 -> 매도 전 추가 매수
if sell_price == '':
if status == '매수': # 포트폴리오 데이터 사용
cell = alpha_list[shortterm_history_cols.index('매수가')] + str(code_row)
shortterm_history_sheet.update_acell(cell, self.portfolio[code].매수가)
cell = alpha_list[shortterm_history_cols.index('매수수량')] + str(code_row)
shortterm_history_sheet.update_acell(cell, self.portfolio[code].수량)
cell = alpha_list[shortterm_history_cols.index('매수일')] + str(code_row)
shortterm_history_sheet.update_acell(cell, self.portfolio[code].매수일)
cell = alpha_list[shortterm_history_cols.index('매수조건')] + str(code_row)
shortterm_history_sheet.update_acell(cell, self.portfolio[code].매수조건)
else: # 매도가가 기록되어 거래가 완료된 종목으로 판단하여 예외발생으로 신규 매수 추가함
raise Exception('매매완료 종목')
except Exception as e:
try:
# logger.debug('CTradeShortTerm_save_history Error1 : 종목명:%s, %s' % (self.portfolio[code].종목명, e))
row = []
row_buy = []
if status == '매수':
row.append(self.portfolio[code].번호)
row.append(self.portfolio[code].종목명)
row.append(self.portfolio[code].매수가)
row.append(self.portfolio[code].수량)
row.append(self.portfolio[code].매수일)
row.append(self.portfolio[code].매수조건)
shortterm_history_sheet.append_row(row)
except Exception as e:
print('CTradeShortTerm_save_history Error2 : 종목명:%s, %s' % (self.portfolio[code].종목명, e))
Telegram('[XTrade]CTradeShortTerm_save_history Error2 : 종목명:%s, %s' % (self.portfolio[code].종목명, e),
send='mc')
logger.error('CTradeShortTerm_save_history Error : 종목명:%s, %s' % (self.portfolio[code].종목명, e))
# 매수 전략별 매수 조건 확인
def buy_strategy(self, code, price):
result = False
condition = self.Stocklist[code]['매수조건'] # 초기값 0
qty = self.Stocklist[code]['매수수량'] # 초기값 0
현재가, 시가, 고가, 저가, 전일종가 = price # 시세 = [현재가, 시가, 고가, 저가, 전일종가]
매수가 = self.Stocklist[code]['매수가'] # [매수가1, 매수가2, 매수가3]
시가위치하한 = self.Stocklist[code]['시가위치'][0]
시가위치상한 = self.Stocklist[code]['시가위치'][1]
# 1. 금일시가 위치 체크(초기 한번)하여 매수조건(1~6)과 주문 수량 계산
if self.Stocklist[code]['시가체크'] == False: # 종목별로 초기에 한번만 시가 위치 체크를 하면 되므로 별도 함수 미사용
매수가.append(시가)
매수가.sort(reverse=True)
band = 매수가.index(시가) # band = 0 : 매수가1 이상, band=1: 매수가1, 2 사이, band=2: 매수가2,3 사이
매수가.remove(시가)
if band == len(매수가): # 매수가 지정한 구간보다 시가가 아래일 경우로 초기값이 result=False, condition=0 리턴
self.Stocklist[code]['시가체크'] = True
self.Stocklist[code]['매수조건'] = 0
self.Stocklist[code]['매수수량'] = 0
return False, 0, 0
else:
# 단위투자금으로 매수가능한 총 수량 계산, band = 0 : 매수가1, band=1: 매수가2, band=2: 매수가3 로 계산
self.Stocklist[code]['매수총수량'] = self.Stocklist[code]['단위투자금'] // 매수가[band]
if band == 0: # 시가가 매수가1보다 높은 경우
# 시가가 매수가1의 시가범위에 포함 : 조건 1, 2, 3
if 매수가[band] * (1 + 시가위치하한 / 100) <= 시가 and 시가 < 매수가[band] * (1 + 시가위치상한 / 100):
condition = len(매수가)
self.Stocklist[code]['매수가전략'] = len(매수가)
qty = self.Stocklist[code]['매수총수량'] // condition
else: # 시가 위치에 미포함
self.Stocklist[code]['시가체크'] = True
self.Stocklist[code]['매수조건'] = 0
self.Stocklist[code]['매수수량'] = 0
return False, 0, 0
else: # 시가가 매수가 중간인 경우 - 매수가1&2사이(band 1) : 조건 4,5 / 매수가2&3사이(band 2) : 조건 6
for i in range(band): # band 1일 경우 매수가 1은 불필요하여 삭제, band 2 : 매수가 1, 2 삭제(band수 만큼 삭제 실행)
매수가.pop(0)
if 매수가[0] * (1 + 시가위치하한 / 100) <= 시가: # 시가범위 포함
# 조건 4 = 매수가길이 1 + band 1 + 2(=band+1) -> 4 = 1 + 2*1 + 1
# 조건 5 = 매수가길이 2 + band 1 + 2(=band+1) -> 5 = 2 + 2*1 + 1
# 조건 6 = 매수가길이 1 + band 2 + 3(=band+1) -> 6 = 1 + 2*2 + 1
condition = len(매수가) + (2 * band) + 1
self.Stocklist[code]['매수가전략'] = len(매수가)
qty = self.Stocklist[code]['매수총수량'] // (condition % 2 + 1)
else:
self.Stocklist[code]['시가체크'] = True
self.Stocklist[code]['매수조건'] = 0
self.Stocklist[code]['매수수량'] = 0
return False, 0, 0
self.Stocklist[code]['시가체크'] = True
self.Stocklist[code]['매수조건'] = condition
self.Stocklist[code]['매수수량'] = qty
else: # 시가 위치 체크를 한 두번째 데이터 이후에는 condition이 0이면 바로 매수 불만족 리턴시킴
if condition == 0: # condition 0은 매수 조건 불만족
return False, 0, 0
# 매수조건 확정, 매수 수량 계산 완료
# 매수상한에 미도달한 상태로 매수가로 내려왔을 때 매수
# 현재가가 해당조건에서의 시가위치 상한 이상으로 오르면 매수상한도달을 True로 해서 매수하지 않게 함
if 현재가 >= 매수가[0] * (1 + 시가위치상한 / 100): self.Stocklist[code]['매수상한도달'] = True
if self.Stocklist[code]['매수주문완료'] < self.Stocklist[code]['매수가전략'] and self.Stocklist[code]['매수상한도달'] == False:
if 현재가 == 매수가[0]:
result = True
self.Stocklist[code]['매수주문완료'] += 1
print("매수모니터링 만족_종목:%s, 시가:%s, 조건:%s, 현재가:%s, 체크결과:%s, 수량:%s" % (
self.Stocklist[code]['종목명'], 시가, condition, 현재가, result, qty))
logger.debug("매수모니터링 만족_종목:%s, 시가:%s, 조건:%s, 현재가:%s, 체크결과:%s, 수량:%s" % (
self.Stocklist[code]['종목명'], 시가, condition, 현재가, result, qty))
return result, condition, qty
# 매도 구간 확인
def profit_band_check(self, 현재가, 매수가):
band_list = [0, 3, 5, 10, 15, 25]
# print('현재가, 매수가', 현재가, 매수가)
ratio = round((현재가 - 매수가) / 매수가 * 100, 2)
# print('ratio', ratio)
if ratio < 3:
return 1
elif ratio in band_list:
return band_list.index(ratio) + 1
else:
band_list.append(ratio)
band_list.sort()
band = band_list.index(ratio)
band_list.remove(ratio)
return band
# 매도 전략별 매도 조건 확인
def sell_strategy(self, code, price):
# print('%s 매도 조건 확인' % code)
try:
result = False
band = self.portfolio[code].매도구간 # 이전 매도 구간 받음
매도방법 = self.매도방법 # '03' : 시장가
qty_ratio = 1 # 매도 수량 결정 : 보유수량 * qty_ratio
현재가, 시가, 고가, 저가, 전일종가 = price # 시세 = [현재가, 시가, 고가, 저가, 전일종가]
매수가 = self.portfolio[code].매수가
# 전략 1, 2, 3과 4 별도 체크
strategy = self.portfolio[code].매도전략
# 전략 1, 2, 3
if strategy != '4':
# 매도를 위한 수익률 구간 체크(매수가 대비 현재가의 수익률 조건에 다른 구간 설정)
new_band = self.profit_band_check(현재가, 매수가)
if (hogacal(시가, 0, self.portfolio[code].시장, '상한가')) <= 현재가:
band = 7
if band < new_band: # 이전 구간보다 현재 구간이 높을 경우(시세가 올라간 경우)만
band = new_band # 구간을 현재 구간으로 변경(반대의 경우는 구간 유지)
if band == 1 and 현재가 <= 매수가 * (1 + (self.portfolio[code].매도구간별조건[0] / 100)):
result = True
elif band == 2 and 현재가 <= 매수가 * (1 + (self.portfolio[code].매도구간별조건[1] / 100)):
result = True
elif band == 3 and 현재가 <= 고가 * (1 + (self.portfolio[code].매도구간별조건[2] / 100)):
result = True
elif band == 4 and 현재가 <= 고가 * (1 + (self.portfolio[code].매도구간별조건[3] / 100)):
result = True
elif band == 5 and 현재가 <= 고가 * (1 + (self.portfolio[code].매도구간별조건[4] / 100)):
result = True
elif band == 6 and 현재가 <= 고가 * (1 + (self.portfolio[code].매도구간별조건[5] / 100)):
result = True
elif band == 7 and 현재가 >= (hogacal(시가, -3, self.Stocklist[code]['시장'], '상한가')):
매도방법 = '00' # 지정가
result = True
self.portfolio[code].매도구간 = band # 포트폴리오에 매도구간 업데이트
try:
if strategy == '2' or strategy == '3': # 매도전략 2(기존 5)
if strategy == '2':
목표가 = self.portfolio[code].매도가[0]
elif strategy == '3':
목표가 = (hogacal(시가 * 1.1, 0, self.Stocklist[code]['시장'], '현재가'))
매도조건 = self.portfolio[code].매도조건 # 매도가 실행된 조건 '': 매도 전, 'B':구간매도, 'T':목표가매도
target_band = self.profit_band_check(목표가, 매수가)
if band < target_band: # 현재가구간이 목표가구간 미만일때 전량매도
qty_ratio = 1
else: # 현재가구간이 목표가구간 이상일 때
if 현재가 == 목표가: # 목표가 도달 시 절반 매도
self.portfolio[code].목표도달 = True # 목표가 도달 여부 True
if 매도조건 == '': # 매도이력이 없는 경우 목표가매도 'T', 절반 매도
self.portfolio[code].매도조건 = 'T'
result = True
if self.portfolio[code].수량 == 1:
qty_ratio = 1
else:
qty_ratio = 0.5
elif 매도조건 == 'B': # 구간 매도 이력이 있을 경우 절반매도가 된 상태이므로 남은 전량매도
result = True
qty_ratio = 1
elif 매도조건 == 'T': # 목표가 매도 이력이 있을 경우 매도미실행
result = False
else: # 현재가가 목표가가 아닐 경우 구간 매도 실행(매도실행여부는 결정된 상태)
if self.portfolio[code].목표도달 == False: # 목표가 도달을 못한 경우면 전량매도
qty_ratio = 1
else:
if 매도조건 == '': # 매도이력이 없는 경우 구간매도 'B', 절반 매도
self.portfolio[code].매도조건 = 'B'
if self.portfolio[code].수량 == 1:
qty_ratio = 1
else:
qty_ratio = 0.5
elif 매도조건 == 'B': # 구간 매도 이력이 있을 경우 매도미실행
result = False
elif 매도조건 == 'T': # 목표가 매도 이력이 있을 경우 전량매도
qty_ratio = 1
except Exception as e:
print('sell_strategy 매도전략 2 Error :', e)
logger.error('CTradeShortTerm_sell_strategy 종목 : %s 매도전략 2 Error : %s' % (code, e))
Telegram('[XTrader]CTradeShortTerm_sell_strategy 종목 : %s 매도전략 2 Error : %s' % (code, e), send='mc')
result = False
return 매도방법, result, qty_ratio
# print('종목코드 : %s, 현재가 : %s, 시가 : %s, 고가 : %s, 매도구간 : %s, 결과 : %s' % (code, 현재가, 시가, 고가, band, result))
return 매도방법, result, qty_ratio
# 전략 4(지정가 00 매도)
else:
매도방법 = '00' # 지정가
try:
# 전략 4의 매도가 = [목표가(원), [손절가(%), 본전가(%), 1차익절가(%), 2차익절가(%)]]
# 1. 매수 후 손절가까지 하락시 매도주문 -> 손절가, 전량매도로 끝
if 현재가 <= 매수가 * (1 + self.portfolio[code].매도가[1][0] / 100):
self.portfolio[code].매도구간 = 0
result = True
qty_ratio = 1
# 2. 1차익절가 도달시 매도주문 -> 1차익절가, 1/3 매도
elif self.portfolio[code].익절가1도달 == False and 현재가 >= 매수가 * (
1 + self.portfolio[code].매도가[1][2] / 100):
self.portfolio[code].매도구간 = 1
self.portfolio[code].익절가1도달 = True
result = True
if self.portfolio[code].수량 == 1:
qty_ratio = 1
elif self.portfolio[code].수량 == 2:
qty_ratio = 0.5
else:
qty_ratio = 0.3
# 3. 2차익절가 도달못하고 본전가까지 하락 또는 고가 -3%까지시 매도주문 -> 1차익절가, 나머지 전량 매도로 끝
elif self.portfolio[code].익절가1도달 == True and self.portfolio[code].익절가2도달 == False and (
(현재가 <= 매수가 * (1 + self.portfolio[code].매도가[1][1] / 100)) or (현재가 <= 고가 * 0.97)):
self.portfolio[code].매도구간 = 1.5
result = True
qty_ratio = 1
# 4. 2차 익절가 도달 시 매도주문 -> 2차 익절가, 1/3 매도
elif self.portfolio[code].익절가1도달 == True and self.portfolio[code].익절가2도달 == False and 현재가 >= 매수가 * (
1 + self.portfolio[code].매도가[1][3] / 100):
self.portfolio[code].매도구간 = 2
self.portfolio[code].익절가2도달 = True
result = True
if self.portfolio[code].수량 == 1:
qty_ratio = 1
else:
qty_ratio = 0.5
# 5. 목표가 도달못하고 2차익절가까지 하락 시 매도주문 -> 2차익절가, 나머지 전량 매도로 끝
elif self.portfolio[code].익절가2도달 == True and self.portfolio[code].목표가도달 == False and (
(현재가 <= 매수가 * (1 + self.portfolio[code].매도가[1][2] / 100)) or (현재가 <= 고가 * 0.97)):
self.portfolio[code].매도구간 = 2.5
result = True
qty_ratio = 1
# 6. 목표가 도달 시 매도주문 -> 목표가, 나머지 전량 매도로 끝
elif self.portfolio[code].목표가도달 == False and 현재가 >= self.portfolio[code].매도가[0]:
self.portfolio[code].매도구간 = 3
self.portfolio[code].목표가도달 = True
result = True
qty_ratio = 1
return 매도방법, result, qty_ratio
except Exception as e:
print('sell_strategy 매도전략 4 Error :', e)
logger.error('CTradeShortTerm_sell_strategy 종목 : %s 매도전략 4 Error : %s' % (code, e))
Telegram('[XTrader]CTradeShortTerm_sell_strategy 종목 : %s 매도전략 4 Error : %s' % (code, e), send='mc')
result = False
return 매도방법, result, qty_ratio
except Exception as e:
print('CTradeShortTerm_sell_strategy Error ', e)
Telegram('[XTrader]CTradeShortTerm_sell_strategy Error : %s' % e, send='mc')
logger.error('CTradeShortTerm_sell_strategy Error : %s' % e)
result = False
qty_ratio = 1
return 매도방법, result, qty_ratio
# 보유일 전략 : 보유기간이 보유일 이상일 경우 전량 매도 실행(Mainwindow 타이머에서 시간 체크)
def hold_strategy(self):
if self.holdcheck == True:
print('보유일 만기 매도 체크')
try:
for code in list(self.portfolio.keys()):
보유기간 = holdingcal(self.portfolio[code].매수일)
print('종목명 : %s, 보유일 : %s, 보유기간 : %s' % (self.portfolio[code].종목명, self.portfolio[code].보유일, 보유기간))
if 보유기간 >= int(self.portfolio[code].보유일) and self.주문실행중_Lock.get('S_%s' % code) is None and \
self.portfolio[code].수량 != 0:
self.portfolio[code].매도구간 = 0
(result, order) = self.정량매도(sRQName='S_%s' % code, 종목코드=code, 매도가=self.portfolio[code].매수가,
수량=self.portfolio[code].수량)
if result == True:
self.주문실행중_Lock['S_%s' % code] = True
Telegram('[XTrader]정량매도(보유일만기) : 종목코드=%s, 종목명=%s, 수량=%s' % (
code, self.portfolio[code].종목명, self.portfolio[code].수량))
logger.info('정량매도(보유일만기) : 종목코드=%s, 종목명=%s, 수량=%s' % (
code, self.portfolio[code].종목명, self.portfolio[code].수량))
else:
Telegram('[XTrader]정액매도실패(보유일만기) : 종목코드=%s, 종목명=%s, 수량=%s' % (
code, self.portfolio[code].종목명, self.portfolio[code].수량))
logger.info('정량매도실패(보유일만기) : 종목코드=%s, 종목명=%s, 수량=%s' % (
code, self.portfolio[code].종목명, self.portfolio[code].수량))
except Exception as e:
print("hold_strategy Error :", e)
# 포트폴리오 생성
def set_portfolio(self, code, buyprice, condition):
try:
self.portfolio[code] = CPortStock_ShortTerm(번호=self.Stocklist[code]['번호'], 종목코드=code,
종목명=self.Stocklist[code]['종목명'],
시장=self.Stocklist[code]['시장'], 매수가=buyprice,
매수조건=condition, 보유일=self.Stocklist['전략']['보유일'],
매도전략=self.Stocklist[code]['매도전략'],
매도가=self.Stocklist[code]['매도가'],
매도구간별조건=self.Stocklist['전략']['매도구간별조건'],
매수일=datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'))
self.Stocklist[code]['매수일'] = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') # 매매이력 업데이트를 위해 매수일 추가
except Exception as e:
print('CTradeShortTerm_set_portfolio Error ', e)
Telegram('[XTrader]CTradeShortTerm_set_portfolio Error : %s' % e, send='mc')
logger.error('CTradeShortTerm_set_portfolio Error : %s' % e)
# Robot_Run이 되면 실행됨 - 매수/매도 종목을 리스트로 저장
def 초기조건(self, codes):
# 매수총액 계산하기
# 금일매도종목 리스트 변수 초기화
# 매도할종목 : 포트폴리오에 있던 종목 추가
# 매수할종목 : 구글에서 받은 종목 추가
self.parent.statusbar.showMessage("[%s] 초기조건준비" % (self.sName))
self.금일매도종목 = [] # 장 마감 후 금일 매도한 종목에 대해서 매매이력 정리 업데이트(매도가, 손익률 등)
self.매도할종목 = []
self.매수할종목 = []
self.매수총액 = 0
self.holdcheck = False
for code in codes: # 구글 시트에서 import된 매수 모니커링 종목은 '매수할종목'에 추가
self.매수할종목.append(code)
# 포트폴리오에 있는 종목은 매도 관련 전략 재확인(구글시트) 및 '매도할종목'에 추가
if len(self.portfolio) > 0:
row_data = shortterm_sell_sheet.get_all_values()
idx_holding = row_data[0].index('보유일')
idx_strategy = row_data[0].index('매도전략')
idx_loss = row_data[0].index('손절가')
idx_sellprice = row_data[0].index('목표가')
for row in row_data[1:]:
code, name, market = get_code(row[1]) # 종목명으로 종목코드, 종목명, 시장 받아서(get_code 함수) 추가
if code in list(self.portfolio.keys()):
self.portfolio[code].보유일 = row[idx_holding]
self.portfolio[code].매도전략 = row[idx_strategy]
self.portfolio[code].매도가 = [] # 매도 전략 변경에 따라 매도가 초기화
# 매도구간별조건 = [손절가(%), 본전가(%), 구간3 고가대비(%), 구간4 고가대비(%), 구간5 고가대비(%), 구간6 고가대비(%)]
self.portfolio[code].매도구간별조건 = []
self.portfolio[code].매도구간별조건.append(round(((int(float(row[idx_loss].replace(',', ''))) / self.portfolio[code].매수가) - 1) * 100, 1)) # 손절가를 퍼센트로 변환하여 업데이트
for idx in range(1, len(self.Stocklist['전략']['매도구간별조건'])): # Stocklist의 매도구간별조건 전체를 바로 append할 경우 모든 종목이 동일한 값으로 들어감
self.portfolio[code].매도구간별조건.append(self.Stocklist['전략']['매도구간별조건'][idx])
if self.portfolio[code].매도전략 == '4': # 매도가 = [목표가(원), [손절가(%), 본전가(%), 1차익절가(%), 2차익절가(%)]]
self.portfolio[code].매도가.append(int(float(row[idx_sellprice].replace(',', ''))))
self.portfolio[code].매도가.append([])
for idx in range(len(self.Stocklist['전략']['전략매도가'])): # Stocklist의 전략매도가 전체를 바로 append할 경우 모든 종목이 동일한 값으로 들어감
self.portfolio[code].매도가[1].append(self.Stocklist['전략']['전략매도가'][idx])
self.portfolio[code].매도가[1][0] = self.portfolio[code].매도구간별조건[0] # float(row[idx_loss].replace('%', ''))
self.portfolio[code].sellcount = 0
self.portfolio[code].매도단위수량 = 0 # 전략4의 기본 매도 단위는 보유수량의 1/3
self.portfolio[code].익절가1도달 = False
self.portfolio[code].익절가2도달 = False
self.portfolio[code].목표가도달 = False
else:
if self.portfolio[code].매도전략 == '2' or self.portfolio[code].매도전략 == '3':
self.portfolio[code].목표도달 = False # 목표가(매도가) 도달 체크(False 상태로 구간 컷일경우 전량 매도)
self.portfolio[code].매도조건 = '' # 구간매도 : B, 목표매도 : T
for port_code in list(self.portfolio.keys()):
# 로봇 시작 시 포트폴리오 종목의 매도구간(전일 매도모니터링)을 1로 초기화
# 구간이 내려가는 건 반영하지 않으므로 초기화를 시켜서 다시 구간 체크 시작하기 위함
self.portfolio[port_code].매도구간 = 1 # 매도 구간은 로봇 실행 시 마다 초기화시킴
# 매수총액계산
self.매수총액 += (self.portfolio[port_code].매수가 * self.portfolio[port_code].수량)
# 포트폴리오에 있는 종목이 구글에서 받아서 만든 Stocklist에 없을 경우만 추가함
# 이 조건이 없을 경우 구글에서 받은 전략들이 아닌 과거 전략이 포트폴리오에서 넘어감
# 근데 포트폴리오에 있는 종목을 왜 Stocklist에 넣어야되는지 모르겠음(내가 하고도...)
if port_code not in list(self.Stocklist.keys()):
self.Stocklist[port_code] = {
'번호': self.portfolio[port_code].번호,
'종목명': self.portfolio[port_code].종목명,
'종목코드': self.portfolio[port_code].종목코드,
'시장': self.portfolio[port_code].시장,
'매수조건': self.portfolio[port_code].매수조건,
'매수가': self.portfolio[port_code].매수가,
'매도전략': self.portfolio[port_code].매도전략,
'매도가': self.portfolio[port_code].매도가
}
self.매도할종목.append(port_code)
# for stock in df_keeplist['종목번호'].values: # 보유 종목 체크해서 매도 종목에 추가 → 로봇이 두개 이상일 경우 중복되므로 미적용
# self.매도할종목.append(stock)
# 종목명 = df_keeplist[df_keeplist['종목번호']==stock]['종목명'].values[0]
# 매입가 = df_keeplist[df_keeplist['종목번호']==stock]['매입가'].values[0]
# 보유수량 = df_keeplist[df_keeplist['종목번호']==stock]['보유수량'].values[0]
# print('종목코드 : %s, 종목명 : %s, 매입가 : %s, 보유수량 : %s' %(stock, 종목명, 매입가, 보유수량))
# self.portfolio[stock] = CPortStock_ShortTerm(종목코드=stock, 종목명=종목명, 매수가=매입가, 수량=보유수량, 매수일='')
def 실시간데이터처리(self, param):
try:
if self.running == True:
체결시간 = '%s %s:%s:%s' % (str(self.d), param['체결시간'][0:2], param['체결시간'][2:4], param['체결시간'][4:])
종목코드 = param['종목코드']
현재가 = abs(int(float(param['현재가'])))
전일대비 = int(float(param['전일대비']))
등락률 = float(param['등락률'])
매도호가 = abs(int(float(param['매도호가'])))
매수호가 = abs(int(float(param['매수호가'])))
누적거래량 = abs(int(float(param['누적거래량'])))
시가 = abs(int(float(param['시가'])))
고가 = abs(int(float(param['고가'])))
저가 = abs(int(float(param['저가'])))
거래회전율 = abs(float(param['거래회전율']))
시가총액 = abs(int(float(param['시가총액'])))
종목명 = self.parent.CODE_POOL[종목코드][1] # pool[종목코드] = [시장구분, 종목명, 주식수, 전일종가, 시가총액]
전일종가 = self.parent.CODE_POOL[종목코드][3]
시세 = [현재가, 시가, 고가, 저가, 전일종가]
self.parent.statusbar.showMessage("[%s] %s %s %s %s" % (체결시간, 종목코드, 종목명, 현재가, 전일대비))
self.wr.writerow([체결시간, 종목코드, 종목명, 현재가, 전일대비])
# 매수 조건
# 매수모니터링 종료 시간 확인
if current_time < self.Stocklist['전략']['모니터링종료시간']:
if 종목코드 in self.매수할종목 and 종목코드 not in self.금일매도종목:
# 매수총액 + 종목단위투자금이 투자총액보다 작음 and 매수주문실행중Lock에 없음 -> 추가매수를 위해서 and 포트폴리오에 없음 조건 삭제
if (self.매수총액 + self.Stocklist[종목코드]['단위투자금'] < self.투자총액) and self.주문실행중_Lock.get(
'B_%s' % 종목코드) is None and len(
self.Stocklist[종목코드]['매수가']) > 0: # and self.portfolio.get(종목코드) is None
# 매수 전략별 모니터링 체크
buy_check, condition, qty = self.buy_strategy(종목코드, 시세)
if buy_check == True and (self.Stocklist[종목코드]['단위투자금'] // 현재가 > 0):
(result, order) = self.정량매수(sRQName='B_%s' % 종목코드, 종목코드=종목코드, 매수가=현재가, 수량=qty)
if result == True:
if self.portfolio.get(종목코드) is None: # 포트폴리오에 없으면 신규 저장
self.set_portfolio(종목코드, 현재가, condition)
self.주문실행중_Lock['B_%s' % 종목코드] = True
Telegram('[XTrader]매수주문 : 종목코드=%s, 종목명=%s, 매수가=%s, 매수조건=%s, 매수수량=%s' % (
종목코드, 종목명, 현재가, condition, qty))
logger.info('매수주문 : 종목코드=%s, 종목명=%s, 매수가=%s, 매수조건=%s, 매수수량=%s' % (
종목코드, 종목명, 현재가, condition, qty))
else:
Telegram('[XTrader]매수실패 : 종목코드=%s, 종목명=%s, 매수가=%s, 매수조건=%s' % (
종목코드, 종목명, 현재가, condition))
logger.info('매수실패 : 종목코드=%s, 종목명=%s, 매수가=%s, 매수조건=%s' % (종목코드, 종목명, 현재가, condition))
else:
if self.매수모니터링체크 == False:
for code in self.매수할종목:
if self.portfolio.get(code) is not None and code not in self.매도할종목:
Telegram('[XTrader]매수모니터링마감 : 종목코드=%s, 종목명=%s 매도모니터링 전환' % (종목코드, 종목명))
logger.info('매수모니터링마감 : 종목코드=%s, 종목명=%s 매도모니터링 전환' % (종목코드, 종목명))
self.매수할종목.remove(code)
self.매도할종목.append(code)
self.매수모니터링체크 = True
logger.info('매도할 종목 :%s' % self.매도할종목)
# 매도 조건
if 종목코드 in self.매도할종목:
# 포트폴리오에 있음 and 매도주문실행중Lock에 없음 and 매수주문실행중Lock에 없음
if self.portfolio.get(종목코드) is not None and self.주문실행중_Lock.get(
'S_%s' % 종목코드) is None: # and self.주문실행중_Lock.get('B_%s' % 종목코드) is None:
# 매도 전략별 모니터링 체크
매도방법, sell_check, ratio = self.sell_strategy(종목코드, 시세)
if sell_check == True:
if 매도방법 == '00':
(result, order) = self.정액매도(sRQName='S_%s' % 종목코드, 종목코드=종목코드, 매도가=현재가,
수량=round(self.portfolio[종목코드].수량 * ratio))
else:
(result, order) = self.정량매도(sRQName='S_%s' % 종목코드, 종목코드=종목코드, 매도가=현재가,
수량=round(self.portfolio[종목코드].수량 * ratio))
if result == True:
self.주문실행중_Lock['S_%s' % 종목코드] = True
Telegram('[XTrader]매도주문 : 종목코드=%s, 종목명=%s, 매도가=%s, 매도전략=%s, 매도구간=%s, 수량=%s' % (
종목코드, 종목명, 현재가, self.portfolio[종목코드].매도전략, self.portfolio[종목코드].매도구간,
int(self.portfolio[종목코드].수량 * ratio)))
if self.portfolio[종목코드].매도전략 == '2':
logger.info(
'매도주문 : 종목코드=%s, 종목명=%s, 매도가=%s, 매도전략=%s, 매도구간=%s, 목표도달=%s, 매도조건=%s, 수량=%s' % (
종목코드, 종목명, 현재가, self.portfolio[종목코드].매도전략, self.portfolio[종목코드].매도구간,
self.portfolio[종목코드].목표도달, self.portfolio[종목코드].매도조건,
int(self.portfolio[종목코드].수량 * ratio)))
else:
logger.info('매도주문 : 종목코드=%s, 종목명=%s, 매도가=%s, 매도전략=%s, 매도구간=%s, 수량=%s' % (
종목코드, 종목명, 현재가, self.portfolio[종목코드].매도전략, self.portfolio[종목코드].매도구간,
int(self.portfolio[종목코드].수량 * ratio)))
else:
Telegram(
'[XTrader]매도실패 : 종목코드=%s, 종목명=%s, 매도가=%s, 매도전략=%s, 매도구간=%s, 수량=%s' % (종목코드, 종목명,
현재가,
self.portfolio[
종목코드].매도전략,
self.portfolio[
종목코드].매도구간,
self.portfolio[
종목코드].수량 * ratio))
logger.info('매도실패 : 종목코드=%s, 종목명=%s, 매도가=%s, 매도전략=%s, 매도구간=%s, 수량=%s' % (종목코드, 종목명,
현재가,
self.portfolio[
종목코드].매도전략,
self.portfolio[
종목코드].매도구간,
self.portfolio[
종목코드].수량 * ratio))
except Exception as e:
print('CTradeShortTerm_실시간데이터처리 Error : %s, %s' % (종목명, e))
Telegram('[XTrader]CTradeShortTerm_실시간데이터처리 Error : %s, %s' % (종목명, e), send='mc')
logger.error('CTradeShortTerm_실시간데이터처리 Error :%s, %s' % (종목명, e))
def 접수처리(self, param):
pass
def 체결처리(self, param):
종목코드 = param['종목코드']
주문번호 = param['주문번호']
self.주문결과[주문번호] = param
주문수량 = int(param['주문수량'])
미체결수량 = int(param['미체결수량'])
체결가 = int(0 if (param['체결가'] is None or param['체결가'] == '') else param['체결가']) # 매입가 동일
단위체결량 = int(0 if (param['단위체결량'] is None or param['단위체결량'] == '') else param['단위체결량'])
당일매매수수료 = int(0 if (param['당일매매수수료'] is None or param['당일매매수수료'] == '') else param['당일매매수수료'])
당일매매세금 = int(0 if (param['당일매매세금'] is None or param['당일매매세금'] == '') else param['당일매매세금'])
# 매수
if param['매도수구분'] == '2':
if self.주문번호_주문_매핑.get(주문번호) is not None:
주문 = self.주문번호_주문_매핑[주문번호]
매수가 = int(주문[2:])
# 단위체결가 = int(0 if (param['단위체결가'] is None or param['단위체결가'] == '') else param['단위체결가'])
# logger.debug('매수-------> %s %s %s %s %s' % (param['종목코드'], param['종목명'], 매수가, 주문수량 - 미체결수량, 미체결수량))
P = self.portfolio.get(종목코드)
if P is not None:
P.종목명 = param['종목명']
P.매수가 = 체결가 # 단위체결가
P.수량 += 단위체결량 # 추가 매수 대비해서 기존 수량에 체결된 수량 계속 더함(주문수량 - 미체결수량)
P.매수일 = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
else:
logger.error('ERROR 포트에 종목이 없음 !!!!')
if 미체결수량 == 0:
try:
self.주문실행중_Lock.pop(주문)
if self.Stocklist[종목코드]['매수주문완료'] >= self.Stocklist[종목코드]['매수가전략']:
self.매수할종목.remove(종목코드)
self.매도할종목.append(종목코드)
Telegram('[XTrader]분할 매수 완료_종목명:%s, 종목코드:%s 매수가:%s, 수량:%s' % (P.종목명, 종목코드, P.매수가, P.수량))
logger.info('분할 매수 완료_종목명:%s, 종목코드:%s 매수가:%s, 수량:%s' % (P.종목명, 종목코드, P.매수가, P.수량))
self.Stocklist[종목코드]['수량'] = P.수량
self.Stocklist[종목코드]['매수가'].pop(0)
self.매수총액 += (P.매수가 * P.수량)
logger.debug('체결처리완료_종목명:%s, 매수총액계산완료:%s' % (P.종목명, self.매수총액))
self.save_history(종목코드, status='매수')
Telegram('[XTrader]매수체결완료_종목명:%s, 매수가:%s, 수량:%s' % (P.종목명, P.매수가, P.수량))
logger.info('매수체결완료_종목명:%s, 매수가:%s, 수량:%s' % (P.종목명, P.매수가, P.수량))
except Exception as e:
Telegram('[XTrader]체결처리_매수 에러 종목명:%s, %s ' % (P.종목명, e), send='mc')
logger.error('체결처리_매수 에러 종목명:%s, %s ' % (P.종목명, e))
# 매도
if param['매도수구분'] == '1':
if self.주문번호_주문_매핑.get(주문번호) is not None:
주문 = self.주문번호_주문_매핑[주문번호]
매도가 = int(주문[2:])
try:
if 미체결수량 == 0:
self.주문실행중_Lock.pop(주문)
P = self.portfolio.get(종목코드)
if P is not None:
P.종목명 = param['종목명']
self.portfolio[종목코드].매도체결가 = 체결가
self.portfolio[종목코드].매도수량 = 주문수량
self.save_history(종목코드, status='매도')
Telegram('[XTrader]매도체결완료_종목명:%s, 체결가:%s, 수량:%s' % (param['종목명'], 체결가, 주문수량))
logger.info('매도체결완료_종목명:%s, 체결가:%s, 수량:%s' % (param['종목명'], 체결가, 주문수량))
except Exception as e:
Telegram('[XTrader]체결처리_매도 Error : %s' % e, send='mc')
logger.error('체결처리_매도 Error : %s' % e)
# 메인 화면에 반영
self.parent.RobotView()
def 잔고처리(self, param):
# print('CTradeShortTerm : 잔고처리')
종목코드 = param['종목코드']
P = self.portfolio.get(종목코드)
if P is not None:
P.매수가 = int(0 if (param['매입단가'] is None or param['매입단가'] == '') else param['매입단가'])
P.수량 = int(0 if (param['보유수량'] is None or param['보유수량'] == '') else param['보유수량'])
if P.수량 == 0:
self.portfolio.pop(종목코드)
self.매도할종목.remove(종목코드)
if 종목코드 not in self.금일매도종목: self.금일매도종목.append(종목코드)
logger.info('잔고처리_포트폴리오POP %s ' % 종목코드)
# 메인 화면에 반영
self.parent.RobotView()
def Run(self, flag=True, sAccount=None):
self.running = flag
ret = 0
# self.manual_portfolio()
for code in list(self.portfolio.keys()):
print(self.portfolio[code].__dict__)
logger.info(self.portfolio[code].__dict__)
if flag == True:
print("%s ROBOT 실행" % (self.sName))
try:
Telegram("[XTrader]%s ROBOT 실행" % (self.sName))
self.sAccount = sAccount
self.투자총액 = floor(int(d2deposit.replace(",", "")) * (self.Stocklist['전략']['투자금비중'] / 100))
print('로봇거래계좌 : ', 로봇거래계좌번호)
print('D+2 예수금 : ', int(d2deposit.replace(",", "")))
print('투자 총액 : ', self.투자총액)
print('Stocklist : ', self.Stocklist)
# self.최대포트수 = floor(int(d2deposit.replace(",", "")) / self.단위투자금 / len(self.parent.robots))
# print(self.최대포트수)
self.주문결과 = dict()
self.주문번호_주문_매핑 = dict()
self.주문실행중_Lock = dict()
codes = list(self.Stocklist.keys())
codes.remove('전략')
codes.remove('컬럼명')
self.초기조건(codes)
print("매도 : ", self.매도할종목)
print("매수 : ", self.매수할종목)
print("매수총액 : ", self.매수총액)
print("포트폴리오 매도모니터링 수정")
for code in list(self.portfolio.keys()):
print(self.portfolio[code].__dict__)
logger.info(self.portfolio[code].__dict__)
self.실시간종목리스트 = self.매도할종목 + self.매수할종목
logger.info("오늘 거래 종목 : %s %s" % (self.sName, ';'.join(self.실시간종목리스트) + ';'))
self.KiwoomConnect() # MainWindow 외에서 키움 API구동시켜서 자체적으로 API데이터송수신가능하도록 함
if len(self.실시간종목리스트) > 0:
self.f = open('data_result.csv', 'a', newline='')
self.wr = csv.writer(self.f)
self.wr.writerow(['체결시간', '종목코드', '종목명', '현재가', '전일대비'])
ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.실시간종목리스트) + ';')
logger.debug("실시간데이타요청 등록결과 %s" % ret)
except Exception as e:
print('CTradeShortTerm_Run Error :', e)
Telegram('[XTrader]CTradeShortTerm_Run Error : %s' % e, send='mc')
logger.error('CTradeShortTerm_Run Error : %s' % e)
else:
Telegram("[XTrader]%s ROBOT 실행 중지" % (self.sName))
print('Stocklist : ', self.Stocklist)
ret = self.KiwoomSetRealRemove(self.sScreenNo, 'ALL')
self.f.close()
del self.f
del self.wr
if self.portfolio is not None:
# 구글 매도모니터링 시트 기존 종목 삭제
num_data = shortterm_sell_sheet.get_all_values()
for i in range(len(num_data)):
shortterm_sell_sheet.delete_rows(2)
for code in list(self.portfolio.keys()):
# 매수 미체결 종목 삭제
if self.portfolio[code].수량 == 0:
self.portfolio.pop(code)
else:
# 포트폴리오 종목은 구글 매도모니터링 시트에 추가하여 전략 수정가능
self.save_history(code, status='매도모니터링')
if len(self.금일매도종목) > 0:
try:
Telegram("[XTrader]%s 금일 매도 종목 손익 Upload : %s" % (self.sName, self.금일매도종목))
logger.info("%s 금일 매도 종목 손익 Upload : %s" % (self.sName, self.금일매도종목))
self.parent.statusbar.showMessage("금일 매도 종목 손익 Upload")
self.DailyProfit(self.금일매도종목)
except Exception as e:
print('%s 금일매도종목 결과 업로드 Error : %s' % (self.sName, e))
finally:
del self.DailyProfitLoop # 금일매도결과 업데이트 시 QEventLoop 사용으로 로봇 저장 시 pickcle 에러 발생하여 삭제시킴
self.KiwoomDisConnect() # 로봇 클래스 내에서 일별종목별실현손익 데이터를 받고나서 연결 해제시킴
# 메인 화면에 반영
self.parent.RobotView()
# 장기 투자용 : 현재 미리 선정한 종목에 대해서 로봇 시작과 동시에 매수 실행 적용
class CTradeLongTerm(CTrade): # 로봇 추가 시 __init__ : 복사, Setting, 초기조건:전략에 맞게, 데이터처리~Run:복사
def __init__(self, sName, UUID, kiwoom=None, parent=None):
self.sName = sName
self.UUID = UUID
self.sAccount = None
self.kiwoom = kiwoom
self.parent = parent
self.running = False
self.주문결과 = dict()
self.주문번호_주문_매핑 = dict()
self.주문실행중_Lock = dict()
self.portfolio = dict()
self.실시간종목리스트 = []
self.SmallScreenNumber = 9999
self.d = today
# RobotAdd 함수에서 초기화 다음 셋팅 실행해서 설정값 넘김
def Setting(self, sScreenNo, 매수방법='03', 매도방법='03', 종목리스트=[]):
self.sScreenNo = sScreenNo
self.실시간종목리스트 = []
self.매수방법 = 매수방법
self.매도방법 = 매도방법
# Robot_Run이 되면 실행됨 - 매수/매도 종목을 리스트로 저장
def 초기조건(self):
# 매수총액 계산하기
# 금일매도종목 리스트 변수 초기화
# 매도할종목 : 포트폴리오에 있던 종목 추가
# 매수할종목 : 구글에서 받은 종목 추가
self.parent.statusbar.showMessage("[%s] 초기조건준비" % (self.sName))
self.금일매도종목 = [] # 장 마감 후 금일 매도한 종목에 대해서 매매이력 정리 업데이트(매도가, 손익률 등)
self.매도할종목 = []
self.매수할종목 = []
self.Stocklist = dict()
df = pd.read_csv('매수종목.csv', encoding='euc-kr')
codes= df['종목'].to_list()
qtys = df['수량'].to_list()
for 종목코드, 수량 in zip(codes, qtys):
code, name, market = get_code(종목코드)
self.Stocklist[code] = {
'종목명' : name,
'종목코드' : code,
'시장구분' : market,
'매수수량' : 수량
}
self.매수할종목 = list(self.Stocklist.keys())
# 포트폴리오에 있는 종목은 매도 관련 전략 재확인(구글시트) 및 '매도할종목'에 추가
if len(self.portfolio) > 0:
for port_code in list(self.portfolio.keys()):
self.매도할종목.append(port_code)
def 실시간데이터처리(self, param):
try:
if self.running == True:
체결시간 = '%s %s:%s:%s' % (str(self.d), param['체결시간'][0:2], param['체결시간'][2:4], param['체결시간'][4:])
종목코드 = param['종목코드']
현재가 = abs(int(float(param['현재가'])))
전일대비 = int(float(param['전일대비']))
등락률 = float(param['등락률'])
매도호가 = abs(int(float(param['매도호가'])))
매수호가 = abs(int(float(param['매수호가'])))
누적거래량 = abs(int(float(param['누적거래량'])))
시가 = abs(int(float(param['시가'])))
고가 = abs(int(float(param['고가'])))
저가 = abs(int(float(param['저가'])))
거래회전율 = abs(float(param['거래회전율']))
시가총액 = abs(int(float(param['시가총액'])))
종목명 = self.parent.CODE_POOL[종목코드][1] # pool[종목코드] = [시장구분, 종목명, 주식수, 전일종가, 시가총액]
시장구분 = self.parent.CODE_POOL[종목코드][0]
전일종가 = self.parent.CODE_POOL[종목코드][3]
시세 = [현재가, 시가, 고가, 저가, 전일종가]
self.parent.statusbar.showMessage("[%s] %s %s %s %s" % (체결시간, 종목코드, 종목명, 현재가, 전일대비))
# 매수 조건
# 매수모니터링 종료 시간 확인
if current_time >= "09:00:00":
if 종목코드 in self.매수할종목 and 종목코드 not in self.금일매도종목 and self.주문실행중_Lock.get('B_%s' % 종목코드) is None:
(result, order) = self.정량매수(sRQName='B_%s' % 종목코드, 종목코드=종목코드, 매수가=현재가, 수량=self.수량[0])
if result == True:
self.portfolio[종목코드] = CPortStock_LongTerm(종목코드=종목코드, 종목명=종목명, 시장=시장구분, 매수가=현재가, 매수일=datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'))
self.주문실행중_Lock['B_%s' % 종목코드] = True
Telegram('[StockTrader]매수주문 : 종목코드=%s, 종목명=%s, 매수가=%s, 매수수량=%s' % (종목코드, 종목명, 현재가, self.수량[0]))
logger.info('매수주문 : 종목코드=%s, 종목명=%s, 매수가=%s, 매수수량=%s' % (종목코드, 종목명, 현재가, self.수량[0]))
else:
Telegram('[StockTrader]매수실패 : 종목코드=%s, 종목명=%s, 매수가=%s' % (종목코드, 종목명, 현재가))
logger.info('매수실패 : 종목코드=%s, 종목명=%s, 매수가=%s' % (종목코드, 종목명, 현재가))
# 매도 조건
if 종목코드 in self.매도할종목:
pass
except Exception as e:
print('CTradeLongTerm_실시간데이터처리 Error : %s, %s' % (종목명, e))
Telegram('[StockTrader]CTradeLongTerm_실시간데이터처리 Error : %s, %s' % (종목명, e), send='mc')
logger.error('CTradeLongTerm_실시간데이터처리 Error :%s, %s' % (종목명, e))
def 접수처리(self, param):
pass
def 체결처리(self, param):
종목코드 = param['종목코드']
주문번호 = param['주문번호']
self.주문결과[주문번호] = param
주문수량 = int(param['주문수량'])
미체결수량 = int(param['미체결수량'])
체결가 = int(0 if (param['체결가'] is None or param['체결가'] == '') else param['체결가']) # 매입가 동일
단위체결량 = int(0 if (param['단위체결량'] is None or param['단위체결량'] == '') else param['단위체결량'])
당일매매수수료 = int(0 if (param['당일매매수수료'] is None or param['당일매매수수료'] == '') else param['당일매매수수료'])
당일매매세금 = int(0 if (param['당일매매세금'] is None or param['당일매매세금'] == '') else param['당일매매세금'])
# 매수
if param['매도수구분'] == '2':
if self.주문번호_주문_매핑.get(주문번호) is not None:
주문 = self.주문번호_주문_매핑[주문번호]
매수가 = int(주문[2:])
# 단위체결가 = int(0 if (param['단위체결가'] is None or param['단위체결가'] == '') else param['단위체결가'])
# logger.debug('매수-------> %s %s %s %s %s' % (param['종목코드'], param['종목명'], 매수가, 주문수량 - 미체결수량, 미체결수량))
P = self.portfolio.get(종목코드)
if P is not None:
P.종목명 = param['종목명']
P.매수가 = 체결가 # 단위체결가
P.수량 += 단위체결량 # 추가 매수 대비해서 기존 수량에 체결된 수량 계속 더함(주문수량 - 미체결수량)
P.매수일 = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
else:
logger.error('ERROR 포트에 종목이 없음 !!!!')
if 미체결수량 == 0:
try:
self.주문실행중_Lock.pop(주문)
self.매수할종목.remove(종목코드)
self.매도할종목.append(종목코드)
Telegram('[StockTrader]매수체결완료_종목명:%s, 매수가:%s, 수량:%s' % (P.종목명, P.매수가, P.수량))
logger.info('매수체결완료_종목명:%s, 매수가:%s, 수량:%s' % (P.종목명, P.매수가, P.수량))
except Exception as e:
Telegram('[XTrader]체결처리_매수 에러 종목명:%s, %s ' % (P.종목명, e), send='mc')
logger.error('체결처리_매수 에러 종목명:%s, %s ' % (P.종목명, e))
# 매도
if param['매도수구분'] == '1':
if self.주문번호_주문_매핑.get(주문번호) is not None:
주문 = self.주문번호_주문_매핑[주문번호]
매도가 = int(주문[2:])
try:
if 미체결수량 == 0:
self.주문실행중_Lock.pop(주문)
P = self.portfolio.get(종목코드)
if P is not None:
P.종목명 = param['종목명']
self.portfolio[종목코드].매도체결가 = 체결가
self.portfolio[종목코드].매도수량 = 주문수량
Telegram('[StockTrader]매도체결완료_종목명:%s, 체결가:%s, 수량:%s' % (param['종목명'], 체결가, 주문수량))
logger.info('매도체결완료_종목명:%s, 체결가:%s, 수량:%s' % (param['종목명'], 체결가, 주문수량))
except Exception as e:
Telegram('[StockTrader]체결처리_매도 Error : %s' % e, send='mc')
logger.error('체결처리_매도 Error : %s' % e)
# 메인 화면에 반영
self.parent.RobotView()
def 잔고처리(self, param):
# print('CTradeShortTerm : 잔고처리')
종목코드 = param['종목코드']
P = self.portfolio.get(종목코드)
if P is not None:
P.매수가 = int(0 if (param['매입단가'] is None or param['매입단가'] == '') else param['매입단가'])
P.수량 = int(0 if (param['보유수량'] is None or param['보유수량'] == '') else param['보유수량'])
if P.수량 == 0:
self.portfolio.pop(종목코드)
self.매도할종목.remove(종목코드)
if 종목코드 not in self.금일매도종목: self.금일매도종목.append(종목코드)
logger.info('잔고처리_포트폴리오POP %s ' % 종목코드)
# 메인 화면에 반영
self.parent.RobotView()
def Run(self, flag=True, sAccount=None):
self.running = flag
ret = 0
# self.manual_portfolio()
# for code in list(self.portfolio.keys()):
# print(self.portfolio[code].__dict__)
# logger.info(self.portfolio[code].__dict__)
if flag == True:
print("%s ROBOT 실행" % (self.sName))
try:
Telegram("[StockTrader]%s ROBOT 실행" % (self.sName))
self.sAccount = sAccount
self.투자총액 = floor(int(d2deposit.replace(",", "")) / len(self.parent.robots))
print('로봇거래계좌 : ', 로봇거래계좌번호)
print('D+2 예수금 : ', int(d2deposit.replace(",", "")))
print('투자 총액 : ', self.투자총액)
# self.최대포트수 = floor(int(d2deposit.replace(",", "")) / self.단위투자금 / len(self.parent.robots))
# print(self.최대포트수)
self.주문결과 = dict()
self.주문번호_주문_매핑 = dict()
self.주문실행중_Lock = dict()
self.초기조건()
print("매도 : ", self.매도할종목)
print("매수 : ", self.매수할종목)
self.실시간종목리스트 = self.매도할종목 + self.매수할종목
logger.info("오늘 거래 종목 : %s %s" % (self.sName, ';'.join(self.실시간종목리스트) + ';'))
self.KiwoomConnect() # MainWindow 외에서 키움 API구동시켜서 자체적으로 API데이터송수신가능하도록 함
if len(self.실시간종목리스트) > 0:
ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.실시간종목리스트) + ';')
logger.debug("[%s]실시간데이타요청 등록결과 %s" % (self.sName, ret))
except Exception as e:
print('CTradeShortTerm_Run Error :', e)
Telegram('[XTrader]CTradeShortTerm_Run Error : %s' % e, send='mc')
logger.error('CTradeShortTerm_Run Error : %s' % e)
else:
Telegram("[StockTrader]%s ROBOT 실행 중지" % (self.sName))
ret = self.KiwoomSetRealRemove(self.sScreenNo, 'ALL')
if self.portfolio is not None:
for code in list(self.portfolio.keys()):
# 매수 미체결 종목 삭제
if self.portfolio[code].수량 == 0:
self.portfolio.pop(code)
self.KiwoomDisConnect() # 로봇 클래스 내에서 일별종목별실현손익 데이터를 받고나서 연결 해제시킴
# 메인 화면에 반영
self.parent.RobotView()
Ui_TradeCondition, QtBaseClass_TradeCondition = uic.loadUiType("./UI/TradeCondition.ui")
class 화면_TradeCondition(QDialog, Ui_TradeCondition):
# def __init__(self, parent):
def __init__(self, sScreenNo, kiwoom=None, parent=None): #
super(화면_TradeCondition, self).__init__(parent)
# self.setAttribute(Qt.WA_DeleteOnClose) # 위젯이 닫힐때 내용 삭제하는 것으로 창이 닫힐때 정보를 저장해야되는 로봇 세팅 시에는 쓰면 에러남!!
self.setupUi(self)
# print("화면_TradeCondition : __init__")
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom #
self.parent = parent
self.progressBar.setValue(0) # Progressbar 초기 셋팅
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['종목코드', '종목명']
self.result = []
self.KiwoomConnect()
self.GetCondition()
# 매수 종목 선정을 위한 체크 함수
def pick_stock(self, data):
row = []
cnt = 0
for code in data['종목코드']:
url = 'https://finance.naver.com/item/sise.nhn?code=%s' % (code)
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
tag = soup.find_all("td", {"class": "num"})
# tag = soup.find_all("span")
result = []
temp = []
for i in tag:
temp.append(i.text.replace('\t', '').replace('\n', ''))
result.append(code) # 종목코드
result.append(int(temp[5].replace(',',''))) # 전일종가
# result.append(temp[7]) # 시가
# result.append(temp[11]) # 저가
# result.append(temp[9]) # 고가
result.append(int(temp[0].replace(',',''))) # 종가(현재가)
# result.append(temp[6]) # 거래량
row.append(result)
cnt+=1
# Progress Bar 디스플레이(전체 시간 대비 비율)
self.progressBar.setValue(cnt / len(data) * 100)
df = pd.DataFrame(data=row, columns=['종목코드', '전일종가', '종가'])
df_final = pd.merge(data, df, on='종목코드')
df_final = df_final.reset_index(drop=True)
df_final['등락률'] = round((df_final['종가'] - df_final['전일종가'])/df_final['전일종가'] * 100, 1)
df_final = df_final[df_final['등락률'] >= 1][['종목코드', '종목명', '등락률']]
df_final = df_final.reset_index(drop=True)
print(df_final)
return df_final
# 저장된 조건 검색식 목록 읽음
def GetCondition(self):
# 1. 저장된 조건 검색식 목록 불러옴 GetCondition
# 2. 조건식 목록 요청 getConditionLoad
# 3. 목록 요청 응답 이벤트 OnReceiveConditionVer에서
# getConditionNameList로 목록을 딕셔너리로 self.condition에 받음
# 4. GetCondition에서 self.condition을 정리해서 콤보박스에 목록 추가함
try:
# print("화면_TradeCondition : GetCondition")
self.getConditionLoad()
self.df_condition = DataFrame()
self.idx = []
self.conName = []
for index in self.condition.keys(): # condition은 dictionary
# print(self.condition)
self.idx.append(str(index))
self.conName.append(self.condition[index])
# self.sendCondition("0156", self.condition[index], index, 1)
self.df_condition['Index'] = self.idx
self.df_condition['Name'] = self.conName
self.df_condition['Table'] = ">> 조건식 " + self.df_condition['Index'] + " : " + self.df_condition['Name']
self.df_condition['Index'] = self.df_condition['Index'].astype(int)
self.df_condition = self.df_condition.sort_values(by='Index').reset_index(drop=True) # 추가
print(self.df_condition) # 추가
self.comboBox_condition.clear()
self.comboBox_condition.addItems(self.df_condition['Table'].values)
except Exception as e:
print("GetCondition_Error")
print(e)
# 조건검색 해당 종목 요청 메서드
def sendCondition(self, screenNo, conditionName, conditionIndex, isRealTime):
# print("화면_TradeCondition : sendCondition")
"""
종목 조건검색 요청 메서드
이 메서드로 얻고자 하는 것은 해당 조건에 맞는 종목코드이다.
해당 종목에 대한 상세정보는 setRealReg() 메서드로 요청할 수 있다.
요청이 실패하는 경우는, 해당 조건식이 없거나, 조건명과 인덱스가 맞지 않거나, 조회 횟수를 초과하는 경우 발생한다.
조건검색에 대한 결과는
1회성 조회의 경우, receiveTrCondition() 이벤트로 결과값이 전달되며
실시간 조회의 경우, receiveTrCondition()과 receiveRealCondition() 이벤트로 결과값이 전달된다.
:param screenNo: string
:param conditionName: string - 조건식 이름
:param conditionIndex: int - 조건식 인덱스
:param isRealTime: int - 조건검색 조회구분(0: 1회성 조회, 1: 실시간 조회)
"""
isRequest = self.kiwoom.dynamicCall("SendCondition(QString, QString, int, int",
screenNo, conditionName, conditionIndex, isRealTime)
# OnReceiveTrCondition() 이벤트 메서드에서 루프 종료
self.conditionLoop = QEventLoop()
self.conditionLoop.exec_()
# 조건 검색 관련 ActiveX와 On시리즈와 붙임(콜백)
def KiwoomConnect(self):
# print("화면_TradeCondition : KiwoomConnect")
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition)
self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer)
self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition)
# 조건 검색 관련 ActiveX와 On시리즈 연결 해제
def KiwoomDisConnect(self):
# print("화면_TradeCondition : KiwoomDisConnect")
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].disconnect(self.OnReceiveTrCondition)
self.kiwoom.OnReceiveConditionVer[int, str].disconnect(self.OnReceiveConditionVer)
self.kiwoom.OnReceiveRealCondition[str, str, str, str].disconnect(self.OnReceiveRealCondition)
# 조건식 목록 요청 메서드
def getConditionLoad(self):
""" 조건식 목록 요청 메서드 """
# print("화면_TradeCondition : getConditionLoad")
self.kiwoom.dynamicCall("GetConditionLoad()")
# OnReceiveConditionVer() 이벤트 메서드에서 루프 종료
self.conditionLoop = QEventLoop()
self.conditionLoop.exec_()
# 조건식 목록 획득 메서드(조건식 목록을 딕셔너리로 리턴)
def getConditionNameList(self):
"""
조건식 획득 메서드
조건식을 딕셔너리 형태로 반환합니다.
이 메서드는 반드시 receiveConditionVer() 이벤트 메서드안에서 사용해야 합니다.
:return: dict - {인덱스:조건명, 인덱스:조건명, ...}
"""
# print("화면_TradeCondition : getConditionNameList")
data = self.kiwoom.dynamicCall("GetConditionNameList()")
conditionList = data.split(';')
del conditionList[-1]
conditionDictionary = {}
for condition in conditionList:
key, value = condition.split('^')
conditionDictionary[int(key)] = value
return conditionDictionary
# 조건검색 세부 종목 조회 요청시 발생되는 이벤트
def OnReceiveTrCondition(self, sScrNo, strCodeList, strConditionName, nIndex, nNext):
logger.debug('main:OnReceiveTrCondition [%s] [%s] [%s] [%s] [%s]' % (sScrNo, strCodeList, strConditionName, nIndex, nNext))
# print("화면_TradeCondition : OnReceiveTrCondition")
"""
(1회성, 실시간) 종목 조건검색 요청시 발생되는 이벤트
:param screenNo: string
:param codes: string - 종목코드 목록(각 종목은 세미콜론으로 구분됨)
:param conditionName: string - 조건식 이름
:param conditionIndex: int - 조건식 인덱스
:param inquiry: int - 조회구분(0: 남은데이터 없음, 2: 남은데이터 있음)
"""
try:
if strCodeList == "":
return
self.codeList = strCodeList.split(';')
del self.codeList[-1]
# print("종목개수: ", len(self.codeList))
# print(self.codeList)
for code in self.codeList:
row = []
# code.append(c)
row.append(code)
n = self.kiwoom.dynamicCall("GetMasterCodeName(QString)", code)
# now = abs(int(self.kiwoom.dynamicCall("GetCommRealData(QString, int)", code, 10)))
# name.append(n)
row.append(n)
# row.append(now)
self.result.append(row)
# self.df_con['종목코드'] = code
# self.df_con['종목명'] = name
# print(self.df_con)
self.data = DataFrame(data=self.result, columns=self.columns)
self.data['종목코드'] = "'" + self.data['종목코드']
# self.data.to_csv('조건식_'+ self.condition_name + '_종목.csv', encoding='euc-kr', index=False)
# print(self.temp)
# 종목에 대한 주가 크롤링 후 최종 종목 선정
# self.data = self.pick_stock(self.data)
self.model.update(self.data)
# self.model.update(self.df_con)
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
except Exception as e:
print("OnReceiveTrCondition Error : ", e)
finally:
self.conditionLoop.exit()
# 조건식 목록 요청에 대한 응답 이벤트
def OnReceiveConditionVer(self, lRet, sMsg):
logger.debug('main:OnReceiveConditionVer : [이벤트] 조건식 저장 [%s] [%s]' % (lRet, sMsg))
# print("화면_TradeCondition : OnReceiveConditionVer")
"""
getConditionLoad() 메서드의 조건식 목록 요청에 대한 응답 이벤트
:param receive: int - 응답결과(1: 성공, 나머지 실패)
:param msg: string - 메세지
"""
try:
self.condition = self.getConditionNameList() # condition이 리턴되서 오면 GetCondition에서 condition 변수 사용 가능
# print("조건식 개수: ", len(self.condition))
# for key in self.condition.keys():
# print("조건식: ", key, ": ", self.condition[key])
except Exception as e:
print("OnReceiveConditionVer_Error")
finally:
self.conditionLoop.exit()
# print(self.conditionName)
# self.kiwoom.dynamicCall("SendCondition(QString,QString, int, int)", '0156', '갭상승', 0, 0)
# 실시간 종목 조건검색 요청시 발생되는 이벤트
def OnReceiveRealCondition(self, sTrCode, strType, strConditionName, strConditionIndex):
logger.debug('main:OnReceiveRealCondition [%s] [%s] [%s] [%s]' % (sTrCode, strType, strConditionName, strConditionIndex))
# print("화면_TradeCondition : OnReceiveRealCondition")
"""
실시간 종목 조건검색 요청시 발생되는 이벤트
:param code: string - 종목코드
:param event: string - 이벤트종류("I": 종목편입, "D": 종목이탈)
:param conditionName: string - 조건식 이름
:param conditionIndex: string - 조건식 인덱스(여기서만 인덱스가 string 타입으로 전달됨)
"""
print("[receiveRealCondition]")
print("종목코드: ", sTrCode)
print("이벤트: ", "종목편입" if strType == "I" else "종목이탈")
# 조건식 종목 검색 버튼 클릭 시 실행됨(시그널/슬롯 추가)
def inquiry(self):
# print("화면_TradeCondition : inquiry")
try:
self.result = []
index = int(self.df_condition['Index'][self.comboBox_condition.currentIndex()]) # currentIndex() : 현재 콤보박스에서 선택된 index를 받음 int형
self.condition_name = self.condition[index]
print(index, self.condition[index])
self.sendCondition("0156", self.condition[index], index, 0) # 1 : 실시간 조건검색식 종목 조회, 0 : 일회성 조회
except Exception as e:
print("조건 검색 Error: ", e)
class CTradeCondition(CTrade): # 로봇 추가 시 __init__ : 복사, Setting / 초기조건:전략에 맞게, 데이터처리 / Run:복사
def __init__(self, sName, UUID, kiwoom=None, parent=None):
# print("CTradeCondition : __init__")
self.sName = sName
self.UUID = UUID
self.sAccount = None
self.kiwoom = kiwoom
self.parent = parent
self.running = False
self.remained_data = True
self.초기설정상태 = False
self.주문결과 = dict()
self.주문번호_주문_매핑 = dict()
self.주문실행중_Lock = dict()
self.portfolio = dict()
self.CList = []
self.실시간종목리스트 = []
self.SmallScreenNumber = 9999
self.d = today
# 조건식 선택에 의해서 투자금, 매수/도 방법, 포트폴리오 수, 검색 종목 등이 저장됨
def Setting(self, sScreenNo, 포트폴리오수, 조건식인덱스, 조건식명, 조건검색타입, 단위투자금, 매수방법, 매도방법):
# print("CTradeCondition : Setting")
self.sScreenNo = sScreenNo
self.포트폴리오수 = 포트폴리오수
self.조건식인덱스 = 조건식인덱스
self.조건식명 = 조건식명
self.조건검색타입 = int(조건검색타입)
self.단위투자금 = 단위투자금
self.매수방법 = 매수방법
self.매도방법 = 매도방법
self.보유일 = 1
self.익절 = 5 # percent
self.고가대비 = -1 # percent
self.손절 = -2.7 # percent
self.투자금비중 = 70 # 예수금 대비 percent
print("조검검색 로봇 셋팅 완료 - 조건인덱스 : %s, 조건식명 : %s, 검색타입 : %s"%(self.조건식인덱스, self.조건식명, self.조건검색타입))
logger.info("조검검색 로봇 셋팅 완료 - 조건인덱스 : %s, 조건식명 : %s, 검색타입 : %s" % (self.조건식인덱스, self.조건식명, self.조건검색타입))
# Robot_Run이 되면 실행됨 - 매도 종목을 리스트로 저장
def 초기조건(self, codes):
# print("CTradeCondition : 초기조건")
self.parent.statusbar.showMessage("[%s] 초기조건준비" % (self.sName))
self.sell_band = [0, 3, 5, 10, 15, 25]
self.매도구간별조건 = [-2.7, 0.5, -2.0, -2.0, -2.0, -2.0]
self.매수모니터링 = True
self.clearcheck = False # 당일청산 체크변수
self.조건검색이벤트 = False
# 매수할 종목은 해당 조건에서 검색된 종목
# 매도할 종목은 이미 매수가 되어 포트폴리오에 저장되어 있는 종목
self.금일매도종목 = []
self.매도할종목 = []
self.매수할종목 = codes
# for code in codes: # 선택한 종목검색식의 종목은 '매수할종목'에 추가
# stock = self.portfolio.get(code) # 초기 로봇 실행 시 포트폴리오는 비어있음
# if stock != None: # 검색한 종목이 포트폴리오에 있으면 '매도할종목'에 추가
# self.매도할종목.append(code)
# else: # 포트폴리오에 없으면 매수종목리스트에 저장
# self.매수할종목.append(code)
for port_code in list(self.portfolio.keys()): # 포트폴리오에 있는 종목은 '매도할종목'에 추가
보유기간 = holdingcal(self.portfolio[port_code].매수일) - 1
if 보유기간 < 3:
self.portfolio[port_code].매도전략 = 5 # 매도지연 종목은 목표가 낮춤 5% -> 3% -> 1%
elif 보유기간 >= 3 and 보유기간 < 5:
self.portfolio[port_code].매도전략 = 3
elif 보유기간 >= 3 and 보유기간 < 5:
self.portfolio[port_code].매도전략 = 1
print(self.portfolio[port_code].__dict__)
logger.info(self.portfolio[port_code].__dict__)
self.매도할종목.append(port_code)
# 수동 포트폴리오 생성
def manual_portfolio(self):
self.portfolio = dict()
self.Stocklist = {
'032190': {'종목명': '다우데이타', '종목코드': '032190', '매수가': [16150], '수량': 12, '보유일':1, '매수일': '2020/08/05 09:08:54'},
'047400': {'종목명': '유니온머티리얼', '종목코드': '047400', '매수가': [5350], '수량': 36, '보유일':1, '매수일': '2020/08/05 09:42:55'},
'085660': {'종목명': '차바이오텍', '종목코드': '085660', '매수가': [22100], '수량': 9, '보유일': 1,
'매수일': '2020/08/05 09:08:54'},
'000020': {'종목명': '동화약품', '종목코드': '000020', '매수가': [25800
], '수량': 7, '보유일': 1,
'매수일': '2020/08/05 09:42:55'},
}
for code in list(self.Stocklist.keys()):
self.portfolio[code] = CPortStock(종목코드=code, 종목명=self.Stocklist[code]['종목명'],
매수가=self.Stocklist[code]['매수가'][0],
보유일=self.Stocklist[code]['보유일'],
수량=self.Stocklist[code]['수량'],
매수일=self.Stocklist[code]['매수일'])
# google spreadsheet 매매이력 생성
def save_history(self, code, status):
# 매매이력 sheet에 해당 종목(매수된 종목)이 있으면 row를 반환 아니면 예외처리 -> 신규 매수로 처리
try:
code_row = condition_history_sheet.findall(self.portfolio[code].종목명)[
-1].row # 종목명이 있는 모든 셀을 찾아서 맨 아래에 있는 셀을 선택
cell = alpha_list[condition_history_cols.index('매도가')] + str(code_row) # 매수 이력에 있는 종목이 매도가 되었는지 확인
sell_price = condition_history_sheet.acell(str(cell)).value
# 매도 이력은 추가 매도(매도전략5의 경우)나 신규 매도인 경우라 매도 이력 유무와 상관없음
if status == '매도': # 포트폴리오 데이터 사용
cell = alpha_list[condition_history_cols.index('매도가')] + str(code_row)
condition_history_sheet.update_acell(cell, self.portfolio[code].매도가)
cell = alpha_list[condition_history_cols.index('매도일')] + str(code_row)
condition_history_sheet.update_acell(cell, datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'))
계산수익률 = round((self.portfolio[code].매도가 / self.portfolio[code].매수가 - 1) * 100, 2)
cell = alpha_list[condition_history_cols.index('수익률(계산)')] + str(code_row) # 수익률 계산
condition_history_sheet.update_acell(cell, 계산수익률)
# 매수 이력은 있으나 매도 이력이 없음 -> 매도 전 추가 매수
if sell_price == '':
if status == '매수': # 포트폴리오 데이터 사용
cell = alpha_list[condition_history_cols.index('매수가')] + str(code_row)
condition_history_sheet.update_acell(cell, self.portfolio[code].매수가)
cell = alpha_list[condition_history_cols.index('매수일')] + str(code_row)
condition_history_sheet.update_acell(cell, self.portfolio[code].매수일)
else: # 매도가가 기록되어 거래가 완료된 종목으로 판단하여 예외발생으로 신규 매수 추가함
raise Exception('매매완료 종목')
except:
row = []
try:
if status == '매수':
row.append(self.portfolio[code].종목명)
row.append(self.portfolio[code].매수가)
row.append(self.portfolio[code].매수일)
condition_history_sheet.append_row(row)
except Exception as e:
print('[%s]save_history Error :'%(self.sName,e))
Telegram('[StockTrader][%s]save_history Error :'%(self.sName,e), send='mc')
logger.error('[%s]save_history Error :'%(self.sName,e))
# 매수 전략별 매수 조건 확인
def buy_strategy(self, code, price):
result = False
현재가, 시가, 고가, 저가, 전일종가 = price # 시세 = [현재가, 시가, 고가, 저가, 전일종가]
if self.단위투자금 // 현재가 > 0 and 현재가 >= 고가 * (0.99) and 저가 > 전일종가 and 현재가 < 시가 * 1.1 and 시가 <= 전일종가 * 1.05:
result = True
return result
# 매도 구간 확인
def profit_band_check(self, 현재가, 매수가):
# print('현재가, 매수가', 현재가, 매수가)
ratio = round((현재가 - 매수가) / 매수가 * 100, 2)
# print('ratio', ratio)
if ratio < 3:
return 1
elif ratio in self.sell_band:
return self.sell_band.index(ratio) + 1
else:
self.sell_band.append(ratio)
self.sell_band.sort()
band = self.sell_band.index(ratio)
self.sell_band.remove(ratio)
return band
# 매도 전략
def sell_strategy(self, code, price):
result = False
band = self.portfolio[code].매도구간 # 이전 매도 구간 받음
현재가, 시가, 고가, 저가, 전일종가 = price # 시세 = [현재가, 시가, 고가, 저가, 전일종가]
매수가 = self.portfolio[code].매수가
sell_price = 현재가
# 매도를 위한 수익률 구간 체크(매수가 대비 현재가의 수익률 조건에 다른 구간 설정)
new_band = self.profit_band_check(현재가, 매수가)
if (hogacal(시가, 0, self.portfolio[code].시장, '상한가')) <= 현재가:
band = 7
if band < new_band: # 이전 구간보다 현재 구간이 높을 경우(시세가 올라간 경우)만
band = new_band # 구간을 현재 구간으로 변경(반대의 경우는 구간 유지)
# self.sell_band = [0, 3, 5, 10, 15, 25]
# self.매도구간별조건 = [-2.7, 0.3, -3.0, -4.0, -5.0, -7.0]
if band == 1 and 현재가 <= 매수가 * (1 + (self.매도구간별조건[0] / 100)):
result = False
elif band == 2 and 현재가 <= 매수가 * (1 + (self.매도구간별조건[1] / 100)): # 3% 이하일 경우 0.3%까지 떨어지면 매도
result = True
elif band == 3 and 현재가 <= 고가 * (1 + (self.매도구간별조건[2] / 100)): # 5% 이상일 경우 고가대비 -3%까지 떨어지면 매도
result = True
elif band == 4 and 현재가 <= 고가 * (1 + (self.매도구간별조건[3] / 100)):
result = True
elif band == 5 and 현재가 <= 고가 * (1 + (self.매도구간별조건[4] / 100)):
result = True
elif band == 6 and 현재가 <= 고가 * (1 + (self.매도구간별조건[5] / 100)):
result = True
elif band == 7 and 현재가 >= (hogacal(시가, -3, self.portfolio[code].시장, '상한가')):
result = True
self.portfolio[code].매도구간 = band # 포트폴리오에 매도구간 업데이트
if current_time >= '15:10:00': # 15시 10분에 매도 처리
result = True
"""
if self.portfolio[code].매도전략변경1 == False and current_time >= '11:00:00' and current_time < '13:00:00':
self.portfolio[code].매도전략변경1 = True
self.portfolio[code].매도전략 = self.portfolio[code].매도전략 * 0.6
elif self.portfolio[code].매도전략변경2 == False and current_time >= '13:00:00':
self.portfolio[code].매도전략변경2 = True
self.portfolio[code].매도전략 = self.portfolio[code].매도전략 * 0.6
if self.portfolio[code].매도전략 < 0.3:
self.portfolio[code].매도전략 = 0.3
# 2. 익절 매도 전략
if 현재가 >= 매수가 * (1 + (self.portfolio[code].매도전략 / 100)):
result = True
sell_price = 현재가
# 3. 고가대비 비율 매도 전략
# elif 현재가 <= 고가 * (1 + (self.고가대비 / 100)):
# result = True
# sell_price = 현재가
# 4. 손절 매도 전략
# elif 현재가 <= 매수가 * (1 + (self.손절 / 100)):
# result = True
# sell_price = 현재가
"""
return result, sell_price
# 당일청산 전략
def clearning_strategy(self):
if self.clearcheck == True:
print('당일청산 매도')
try:
for code in list(self.portfolio.keys()):
if self.주문실행중_Lock.get('S_%s' % code) is None and self.portfolio[code].수량 != 0:
self.portfolio[code].매도구간 = 0
self.매도방법 = '03' # 03:시장가
(result, order) = self.정량매도(sRQName='S_%s' % code, 종목코드=code, 매도가=self.portfolio[code].매수가,
수량=self.portfolio[code].수량)
if result == True:
self.주문실행중_Lock['S_%s' % code] = True
Telegram('[StockTrader]정량매도(당일청산) : 종목코드=%s, 종목명=%s, 수량=%s' % (code, self.portfolio[code].종목명, self.portfolio[code].수량), send='mc')
logger.info('정량매도(당일청산) : 종목코드=%s, 종목명=%s, 수량=%s' % (code, self.portfolio[code].종목명, self.portfolio[code].수량))
else:
Telegram('[StockTrader]정액매도실패(당일청산) : 종목코드=%s, 종목명=%s, 수량=%s' % (code, self.portfolio[code].종목명, self.portfolio[code].수량), send='mc')
logger.info('정량매도실패(당일청산) : 종목코드=%s, 종목명=%s, 수량=%s' % (code, self.portfolio[code].종목명, self.portfolio[code].수량))
except Exception as e:
print("clearning_strategy Error :", e)
# 주문처리
def 실시간데이터처리(self, param):
if self.running == True:
체결시간 = '%s %s:%s:%s' % (str(self.d), param['체결시간'][0:2], param['체결시간'][2:4], param['체결시간'][4:])
종목코드 = param['종목코드']
현재가 = abs(int(float(param['현재가'])))
전일대비 = int(float(param['전일대비']))
등락률 = float(param['등락률'])
매도호가 = abs(int(float(param['매도호가'])))
매수호가 = abs(int(float(param['매수호가'])))
누적거래량 = abs(int(float(param['누적거래량'])))
시가 = abs(int(float(param['시가'])))
고가 = abs(int(float(param['고가'])))
저가 = abs(int(float(param['저가'])))
거래회전율 = abs(float(param['거래회전율']))
시가총액 = abs(int(float(param['시가총액'])))
전일종가 = 현재가 - 전일대비
# MainWindow의 __init__에서 CODE_POOL 변수 선언(self.CODE_POOL = self.get_code_pool()), pool[종목코드] = [시장구분, 종목명, 주식수, 시가총액]
종목명 = self.parent.CODE_POOL[종목코드][1] # pool[종목코드] = [시장구분, 종목명, 주식수, 전일종가, 시가총액]
시장구분 = self.parent.CODE_POOL[종목코드][0]
전일종가 = self.parent.CODE_POOL[종목코드][3]
시세 = [현재가, 시가, 고가, 저가, 전일종가]
self.parent.statusbar.showMessage("[%s] %s %s %s %s" % (체결시간, 종목코드, 종목명, 현재가, 전일대비))
# 정액매도 후 포트폴리오/매도할종목에서 제거
if 종목코드 in self.매도할종목:
if self.portfolio.get(종목코드) is not None and self.주문실행중_Lock.get('S_%s' % 종목코드) is None:
# 매도 전략별 모니터링 체크
sell_check, 매도가 = self.sell_strategy(종목코드, 시세)
if sell_check == True:
(result, order) = self.정액매도(sRQName='S_%s' % 종목코드, 종목코드=종목코드, 매도가=매도가, 수량=self.portfolio[종목코드].수량)
if result == True:
self.주문실행중_Lock['S_%s' % 종목코드] = True
if 종목코드 not in self.금일매도종목: self.금일매도종목.append(종목코드)
Telegram('[StockTrader]%s 매도주문 : 종목코드=%s, 종목명=%s, 매도구간=%s, 매도가=%s, 수량=%s' % (self.sName, 종목코드, 종목명, self.portfolio[종목코드].매도구간, 현재가, self.portfolio[종목코드].수량), send='mc')
logger.info('[StockTrader]%s 매도주문 : 종목코드=%s, 종목명=%s, 매도구간=%s, 매도가=%s, 수량=%s' % (self.sName, 종목코드, 종목명, self.portfolio[종목코드].매도구간, 현재가, self.portfolio[종목코드].수량))
else:
Telegram('[StockTrader]%s 매도실패 : 종목코드=%s, 종목명=%s, 매도가=%s, 수량=%s' % (self.sName, 종목코드, 종목명, 현재가, self.portfolio[종목코드].수량), send='mc')
logger.info('[StockTrader]%s 매도실패 : 종목코드=%s, 종목명=%s, 매도가=%s, 수량=%s' % (self.sName, 종목코드, 종목명, 현재가, self.portfolio[종목코드].수량))
# 매수할 종목에 대해서 정액매수 주문하고 포트폴리오/매도할종목에 추가, 매수할종목에서 제외
if current_time <= '14:30:00':
if 종목코드 in self.매수할종목 and 종목코드 not in self.금일매도종목:
if len(self.portfolio) < self.최대포트수 and self.portfolio.get(종목코드) is None and self.주문실행중_Lock.get('B_%s' % 종목코드) is None:
buy_check = self.buy_strategy(종목코드, 시세)
if buy_check == True:
(result, order) = self.정액매수(sRQName='B_%s' % 종목코드, 종목코드=종목코드, 매수가=현재가, 매수금액=self.단위투자금)
if result == True:
self.portfolio[종목코드] = CPortStock(종목코드=종목코드, 종목명=종목명, 시장=시장구분, 매수가=현재가, 보유일=self.보유일, 매도전략 = self.익절,
매수일=datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'))
self.주문실행중_Lock['B_%s' % 종목코드] = True
Telegram('[StockTrader]%s 매수주문 : 종목코드=%s, 종목명=%s, 매수가=%s' % (self.sName, 종목코드, 종목명, 현재가), send='mc')
logger.info('[StockTrader]%s 매수주문 : 종목코드=%s, 종목명=%s, 매수가=%s' % (self.sName, 종목코드, 종목명, 현재가))
else:
Telegram('[StockTrader]%s 매수실패 : 종목코드=%s, 종목명=%s, 매수가=%s' % (self.sName, 종목코드, 종목명, 현재가), send='mc')
logger.info('[StockTrader]%s 매수실패 : 종목코드=%s, 종목명=%s, 매수가=%s' % (self.sName, 종목코드, 종목명, 현재가))
else:
if self.매수모니터링 == True:
self.parent.ConditionTick.stop()
self.매수모니터링 = False
logger.info("매수모니터링 시간 초과")
def 접수처리(self, param):
pass
# OnReceiveChejanData에서 체결처리가 되면 체결처리 호출
def 체결처리(self, param):
종목코드 = param['종목코드']
주문번호 = param['주문번호']
self.주문결과[주문번호] = param
주문수량 = int(param['주문수량'])
미체결수량 = int(param['미체결수량'])
체결가 = int(0 if (param['체결가'] is None or param['체결가'] == '') else param['체결가']) # 매입가 동일
단위체결량 = int(0 if (param['단위체결량'] is None or param['단위체결량'] == '') else param['단위체결량'])
당일매매수수료 = int(0 if (param['당일매매수수료'] is None or param['당일매매수수료'] == '') else param['당일매매수수료'])
당일매매세금 = int(0 if (param['당일매매세금'] is None or param['당일매매세금'] == '') else param['당일매매세금'])
# 매수
if param['매도수구분'] == '2':
if self.주문번호_주문_매핑.get(주문번호) is not None:
주문 = self.주문번호_주문_매핑[주문번호]
매수가 = int(주문[2:])
P = self.portfolio.get(종목코드)
if P is not None:
P.종목명 = param['종목명']
P.매수가 = 체결가 # 단위체결가
P.수량 += 단위체결량 # 추가 매수 대비해서 기존 수량에 체결된 수량 계속 더함(주문수량 - 미체결수량)
P.매수일 = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
else:
logger.error('ERROR 포트에 종목이 없음 !!!!')
if 미체결수량 == 0:
try:
self.주문실행중_Lock.pop(주문)
self.매수할종목.remove(종목코드)
self.매도할종목.append(종목코드)
self.save_history(종목코드, status='매수')
Telegram('[StockTrader]%s 매수체결완료_종목명:%s, 매수가:%s, 수량:%s' % (self.sName, P.종목명, P.매수가, P.수량), send='mc')
logger.info('[StockTrader]%s %s 매수 완료 : 매수/주문%s Pop, 매도 Append ' % (self.sName, 종목코드, 주문))
except Exception as e:
Telegram('[StockTrader]%s 체결처리_매수 POP에러 종목명:%s ' % (self.sName, P.종목명), send='mc')
logger.error('[StockTrader]%s 체결처리_매수 POP에러 종목명:%s ' % (self.sName, P.종목명))
# 매도
if param['매도수구분'] == '1':
if self.주문번호_주문_매핑.get(주문번호) is not None:
주문 = self.주문번호_주문_매핑[주문번호]
매도가 = int(주문[2:])
try:
if 미체결수량 == 0:
self.주문실행중_Lock.pop(주문)
P = self.portfolio.get(종목코드)
if P is not None:
P.종목명 = param['종목명']
self.portfolio[종목코드].매도가 = 체결가
self.save_history(종목코드, status='매도')
Telegram('[StockTrader]%s 매도체결완료_종목명:%s, 체결가:%s, 수량:%s' % (self.sName, param['종목명'], 체결가, 주문수량), send='mc')
logger.info('[StockTrader]%s 매도체결완료_종목명:%s, 체결가:%s, 수량:%s' % (self.sName, param['종목명'], 체결가, 주문수량))
except Exception as e:
Telegram('[StockTrader]%s 체결처리_매도 매매이력 Error : %s' % (self.sName, e), send='mc')
logger.error('[StockTrader]%s 체결처리_매도 매매이력 Error : %s' % (self.sName, e))
# 메인 화면에 반영
self.parent.RobotView()
def 잔고처리(self, param):
종목코드 = param['종목코드']
P = self.portfolio.get(종목코드)
if P is not None:
P.매수가 = int(0 if (param['매입단가'] is None or param['매입단가'] == '') else param['매입단가'])
P.수량 = int(0 if (param['보유수량'] is None or param['보유수량'] == '') else param['보유수량'])
if P.수량 == 0:
self.portfolio.pop(종목코드)
self.매도할종목.remove(종목코드)
if 종목코드 not in self.금일매도종목: self.금일매도종목.append(종목코드)
logger.info('잔고처리_포트폴리오POP %s ' % 종목코드)
# 메인 화면에 반영
self.parent.RobotView()
# MainWindow의 ConditionTick에 의해서 3분마다 실행
def ConditionCheck(self):
if '3' in self.sName:
if current_time >= "15:00:00" and self.조건검색이벤트 == False:
self.조건검색이벤트 = True
codes = self.GetCodes(self.조건식인덱스, self.조건식명, self.조건검색타입)
print(current_time, codes)
code_list=[]
for code in codes:
code_list.append(code + '_' + self.parent.CODE_POOL[code][1] + '\n')
code_list = "".join(code_list)
print(current_time, code_list)
Telegram(code_list, send='mc')
else:
pass
else:
codes = self.GetCodes(self.조건식인덱스, self.조건식명, self.조건검색타입)
print(current_time, codes)
for code in codes:
if code not in self.매수할종목 and self.portfolio.get(code) is None and code not in self.금일매도종목:
print('매수종목추가 : ', code, self.parent.CODE_POOL[code][1])
self.매수할종목.append(code)
self.실시간종목리스트.append(code)
ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.실시간종목리스트) + ';') # 실시간 시세조회 종목 추가
logger.debug("[%s]실시간데이타요청 등록결과 %s %s" % (self.sName, self.실시간종목리스트, ret))
# 실시간 조검 검색 편입 종목 처리
def 실시간조건처리(self, code):
if (code not in self.매수할종목) and (self.portfolio.get(code) is None) and (code not in self.금일매도종목):
print('매수종목추가 : ', code)
self.매수할종목.append(code)
self.실시간종목리스트.append(code)
ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.실시간종목리스트) + ';') # 실시간 시세조회 종목 추가
logger.debug("[%s]실시간데이타요청 등록결과 %s %s" % (self.sName, self.실시간종목리스트, ret))
def Run(self, flag=True, sAccount=None):
self.running = flag
ret = 0
codes = []
self.codeList = []
# self.manual_portfolio()
if flag == True:
print("%s ROBOT 실행" % (self.sName))
self.KiwoomConnect()
try:
logger.info("[%s]조건식 거래 로봇 실행"%(self.sName))
self.sAccount = Account
self.주문결과 = dict()
self.주문번호_주문_매핑 = dict()
self.주문실행중_Lock = dict()
self.투자총액 = floor(int(d2deposit.replace(",", "")) * (self.투자금비중 / 100))
print('D+2 예수금 : ', int(d2deposit.replace(",", "")))
print('투자금 : ', self.투자총액)
print('단위투자금 : ', self.단위투자금)
self.최대포트수 = self.포트폴리오수 # floor(self.투자총액 / self.단위투자금) + len(self.portfolio)
# print('기존포트수 : ', len(self.portfolio))
print('최대포트수 : ', self.최대포트수)
print("조건식 인덱스 : ", self.조건식인덱스, type(self.조건식인덱스))
print("조건식명 : ", self.조건식명)
if self.조건검색타입 == 0: # 3분봉 검색
self.parent.ConditionTick.start(1000)
else: # 실시간 검색
print('실시간 조건검색')
codes = self.GetCodes(self.조건식인덱스, self.조건식명, self.조건검색타입)
codes = []
self.초기조건(codes)
print("매수 : ", self.매수할종목)
print("매도 : ", self.매도할종목)
self.실시간종목리스트 = self.매도할종목 + self.매수할종목
logger.info("[%s]오늘 거래 종목 : %s" % (self.sName, ';'.join(self.실시간종목리스트) + ';'))
if len(self.실시간종목리스트) > 0:
ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.실시간종목리스트) + ';') # 실시간 시세조회 등록
logger.debug("실시간데이타요청 등록결과 %s" % ret)
except Exception as e:
print('[%s]_Run Error : %s' % (self.sName,e))
Telegram('[StockTrader][%s]_Run Error : %s' % (self.sName,e), send='mc')
logger.error('[StockTrader][%s]_Run Error : %s' % (self.sName,e))
else:
if self.조건검색타입 == 0:
self.parent.ConditionTick.stop() # MainWindow 타이머 중지
else:
ret = self.sendConditionStop("0156", self.조건식명, self.조건식인덱스) # 실시간 조검 검색 중지
ret = self.KiwoomSetRealRemove(self.sScreenNo, 'ALL')
if self.portfolio is not None:
for code in list(self.portfolio.keys()):
if self.portfolio[code].수량 == 0:
self.portfolio.pop(code)
if len(self.금일매도종목) > 0:
try:
Telegram("[StockTrader]%s 금일 매도 종목 손익 Upload : %s" % (self.sName, self.금일매도종목), send='mc')
logger.info("[%s]금일 매도 종목 손익 Upload : %s" % (self.sName, self.금일매도종목))
self.parent.statusbar.showMessage("금일 매도 종목 손익 Upload")
self.DailyProfit(self.금일매도종목)
except Exception as e:
print('%s 금일매도종목 결과 업로드 Error : %s' %(self.sName, e))
finally:
del self.DailyProfitLoop # 금일매도결과 업데이트 시 QEventLoop 사용으로 로봇 저장 시 pickcle 에러 발생하여 삭제시킴
del self.ConditionLoop
self.KiwoomDisConnect() # 로봇 클래스 내에서 일별종목별실현손익 데이터를 받고나서 연결 해제시킴
# 메인 화면에 반영
self.parent.RobotView()
class 화면_ConditionMonitoring(QDialog, Ui_TradeCondition):
def __init__(self, sScreenNo, kiwoom=None, parent=None): #
super(화면_ConditionMonitoring, self).__init__(parent)
# self.setAttribute(Qt.WA_DeleteOnClose) # 위젯이 닫힐때 내용 삭제하는 것으로 창이 닫힐때 정보를 저장해야되는 로봇 세팅 시에는 쓰면 에러남!!
self.setupUi(self)
self.setWindowTitle("ConditionMonitoring")
self.lineEdit_name.setText('ConditionMonitoring')
self.progressBar.setValue(0) # Progressbar 초기 셋팅
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom #
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['종목코드', '종목명', '조건식']
self.result = []
self.KiwoomConnect()
self.GetCondition()
# 저장된 조건 검색식 목록 읽음
def GetCondition(self):
try:
self.getConditionLoad()
self.df_condition = DataFrame()
self.idx = []
self.conName = []
for index in self.condition.keys(): # condition은 dictionary
# print(self.condition)
self.idx.append(str(index))
self.conName.append(self.condition[index])
# self.sendCondition("0156", self.condition[index], index, 1)
self.df_condition['Index'] = self.idx
self.df_condition['Name'] = self.conName
self.df_condition['Table'] = ">> 조건식 " + self.df_condition['Index'] + " : " + self.df_condition['Name']
self.df_condition['Index'] = self.df_condition['Index'].astype(int)
self.df_condition = self.df_condition.sort_values(by='Index').reset_index(drop=True) # 추가
print(self.df_condition) # 추가
self.comboBox_condition.clear()
self.comboBox_condition.addItems(self.df_condition['Table'].values)
except Exception as e:
print("GetCondition_Error")
print(e)
# 조건검색 해당 종목 요청 메서드
def sendCondition(self, screenNo, conditionName, conditionIndex, isRealTime):
isRequest = self.kiwoom.dynamicCall("SendCondition(QString, QString, int, int",
screenNo, conditionName, conditionIndex, isRealTime)
# OnReceiveTrCondition() 이벤트 메서드에서 루프 종료
self.conditionLoop = QEventLoop()
self.conditionLoop.exec_()
# 조건 검색 관련 ActiveX와 On시리즈와 붙임(콜백)
def KiwoomConnect(self):
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition)
self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer)
self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition)
# 조건 검색 관련 ActiveX와 On시리즈 연결 해제
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].disconnect(self.OnReceiveTrCondition)
self.kiwoom.OnReceiveConditionVer[int, str].disconnect(self.OnReceiveConditionVer)
self.kiwoom.OnReceiveRealCondition[str, str, str, str].disconnect(self.OnReceiveRealCondition)
# 조건식 목록 요청 메서드
def getConditionLoad(self):
self.kiwoom.dynamicCall("GetConditionLoad()")
# OnReceiveConditionVer() 이벤트 메서드에서 루프 종료
self.conditionLoop = QEventLoop()
self.conditionLoop.exec_()
# 조건식 목록 획득 메서드(조건식 목록을 딕셔너리로 리턴)
def getConditionNameList(self):
data = self.kiwoom.dynamicCall("GetConditionNameList()")
conditionList = data.split(';')
del conditionList[-1]
conditionDictionary = {}
for condition in conditionList:
key, value = condition.split('^')
conditionDictionary[int(key)] = value
return conditionDictionary
# 조건검색 세부 종목 조회 요청시 발생되는 이벤트
def OnReceiveTrCondition(self, sScrNo, strCodeList, strConditionName, nIndex, nNext):
logger.debug('main:OnReceiveTrCondition [%s] [%s] [%s] [%s] [%s]' % (
sScrNo, strCodeList, strConditionName, nIndex, nNext))
try:
if strCodeList == "":
return
self.codeList = strCodeList.split(';')
del self.codeList[-1]
# print("종목개수: ", len(self.codeList))
# print(self.codeList)
for code in self.codeList:
row = []
# code.append(c)
row.append(code)
n = self.kiwoom.dynamicCall("GetMasterCodeName(QString)", code)
# now = abs(int(self.kiwoom.dynamicCall("GetCommRealData(QString, int)", code, 10)))
# name.append(n)
row.append(n)
row.append(strConditionName)
self.result.append(row)
# self.df_con['종목코드'] = code
# self.df_con['종목명'] = name
# print(self.df_con)
self.data = DataFrame(data=self.result, columns=self.columns)
self.data['종목코드'] = "'" + self.data['종목코드']
self.data = self.data.sort_values(by=['조건식', '종목명'])
self.data = self.data.drop_duplicates(['종목명', '조건식'], keep='first').reset_index(drop=True)
print(self.data)
self.model.update(self.data)
# self.model.update(self.df_con)
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
finally:
time.sleep(2)
self.conditionLoop.exit()
# 조건식 목록 요청에 대한 응답 이벤트
def OnReceiveConditionVer(self, lRet, sMsg):
logger.debug('main:OnReceiveConditionVer : [이벤트] 조건식 저장 [%s] [%s]' % (lRet, sMsg))
try:
self.condition = self.getConditionNameList() # condition이 리턴되서 오면 GetCondition에서 condition 변수 사용 가능
# print("조건식 개수: ", len(self.condition))
# for key in self.condition.keys():
# print("조건식: ", key, ": ", self.condition[key])
except Exception as e:
print("OnReceiveConditionVer_Error")
finally:
self.conditionLoop.exit()
# print(self.conditionName)
# self.kiwoom.dynamicCall("SendCondition(QString,QString, int, int)", '0156', '갭상승', 0, 0)
# 실시간 종목 조건검색 요청시 발생되는 이벤트
def OnReceiveRealCondition(self, sTrCode, strType, strConditionName, strConditionIndex):
logger.debug(
'main:OnReceiveRealCondition [%s] [%s] [%s] [%s]' % (sTrCode, strType, strConditionName, strConditionIndex))
print("종목코드: ", sTrCode)
print("이벤트: ", "종목편입" if strType == "I" else "종목이탈")
# 조건식 종목 검색 버튼 클릭 시 실행됨(시그널/슬롯 추가)
def inquiry(self):
self.result = []
cnt=0
print('조건식 갯수 :', len(self.df_condition))
for idx in range(len(self.df_condition)):
print(idx, self.condition[idx])
self.sendCondition("0156", self.condition[idx], idx, 0)
cnt += 1
# Progress Bar 디스플레이(전체 시간 대비 비율)
self.progressBar.setValue(cnt / len(self.df_condition) * 100)
print('조건식 종목 조회 완료')
self.parent.statusbar.showMessage("조건식 종목 조회 완료")
# 원하는 종목/주가 설정 후 알림
class CPriceMonitoring(CTrade): # 로봇 추가 시 __init__ : 복사, Setting, 초기조건:전략에 맞게, 데이터처리~Run:복사
def __init__(self, sName, UUID, kiwoom=None, parent=None):
self.sName = sName
self.UUID = UUID
self.sAccount = None
self.kiwoom = kiwoom
self.parent = parent
self.running = False
self.주문결과 = dict()
self.주문번호_주문_매핑 = dict()
self.주문실행중_Lock = dict()
self.portfolio = dict()
self.실시간종목리스트 = []
self.SmallScreenNumber = 9999
self.d = today
# RobotAdd 함수에서 초기화 다음 셋팅 실행해서 설정값 넘김
def Setting(self, sScreenNo):
self.sScreenNo = sScreenNo
# 수동 포트폴리오 생성
def manual_portfolio(self):
self.portfolio = dict()
self.Stocklist = {
'005935': {'종목명': '삼성전자우', '종목코드': '005935', '시장': 'KOSPI', '매수가': 50600,
'수량': 10, '매수일': '2020/09/24 09:00:00'},
'092130': {'종목명': '이크레더블', '종목코드': '092130', '시장': 'KOSDAQ', '매수가': 24019,
'수량': 21, '매수일': '2020/11/04 09:00:00'},
'271560': {'종목명': '오리온', '종목코드': '271560', '시장': 'KOSPI', '매수가': 132000,
'수량': 10, '매수일': '2020/10/08 09:00:00'},
}
for code in list(self.Stocklist.keys()):
self.portfolio[code] = CPortStock_LongTerm(종목코드=code,
종목명=self.Stocklist[code]['종목명'],
시장=self.Stocklist[code]['시장'],
매수가=self.Stocklist[code]['매수가'],
수량=self.Stocklist[code]['수량'],
매수일=self.Stocklist[code]['매수일'])
# Robot_Run이 되면 실행됨 - 매수/매도 종목을 리스트로 저장
def 초기조건(self):
self.parent.statusbar.showMessage("[%s] 초기조건준비" % (self.sName))
row_data = price_monitoring_sheet.get_all_values()
self.stocklist = {}
self.Data_save = False
for row in row_data[1:]:
temp = []
try:
code, name, market = get_code(row[0]) # 종목명으로 종목코드, 종목명, 시장 받아서(get_code 함수) 추가
except Exception as e:
name = ''
code = ''
market = ''
print('구글 매수모니터링 시트 종목명 오류 : %s' % (row[1]))
logger.error('구글 매수모니터링 시트 오류 : %s' % (row[1]))
Telegram('[StockTrader]구글 매수모니터링 시트 오류 : %s' % (row[1]))
for idx in range(1, len(row)):
if row[idx] != '':
temp.append(int(row[idx]))
self.stocklist[code] = {
'종목명': name,
'종목코드': code,
'모니터링주가': temp
}
print(self.stocklist)
self.모니터링종목 = list(self.stocklist.keys())
try:
self.df_codes = pd.DataFrame()
cnt = 0
for code in self.모니터링종목:
temp = fdr.DataReader(code)
temp = temp[-70:][['Open', 'High', 'Low', 'Close', 'Volume']]
temp.reset_index(inplace=True)
temp['Date'] = temp['Date'].astype(str)
temp['Code'] = code
if cnt == 0:
self.df_codes = temp.copy()
else:
self.df_codes = pd.concat([self.df_codes, temp])
self.df_codes.reset_index(drop=True, inplace=True)
cnt += 1
except Exception as e:
print('CPriceMonitoring_초기조건 오류 : %s' % (e))
logger.error('CPriceMonitoring_초기조건 오류 : %s' % (e))
Telegram('[StockTrader]CPriceMonitoring_초기조건 오류 : %s' % (e))
# 이동평균가 위치 확인
def MA_Check(self, data):
if data['MA5'] < data['MA20']:
return True
else:
return False
# 이동평균을 이용한 매수 전략 신호 발생
def MA_Strategy(self, name, code, price):
today = datetime.datetime.today().strftime("%Y-%m-%d")
현재가, 시가, 고가, 저가, 거래량 = price
try:
df = self.df_codes.loc[self.df_codes['Code'] == code]
df.reset_index(drop=True, inplace=True)
df.loc[len(df)] = [today, 시가, 고가, 저가, 현재가, 거래량, code] #['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Code]
df['MA5'] = df['Close'].rolling(window=5).mean()
df['MA20'] = df['Close'].rolling(window=20).mean()
df['MA_Check'] = df.apply(self.MA_Check, axis=1)
if self.Data_save==False and current_time >= '15:19:00':
self.Data_save = True
self.df_codes.to_csv('PriceData.csv', encoding='euc-kr', index=False)
if df.iloc[-2]['MA_Check'] == True and df.iloc[-1]['MA_Check'] == False:
Telegram('[StockTrader]%s 매수 신호 발생\n현재가 : %s, 시가 : %s, 고가 : %s, 저가 : %s' % (name, 현재가, 시가, 고가, 저가))
logger.info('[StockTrader]%s 매수 신호 발생\n현재가 : %s, 시가 : %s, 고가 : %s, 저가 : %s' % (name, 현재가, 시가, 고가, 저가))
except Exception as e:
print('CPriceMonitoring_MA_Strategy 오류 : %s' % (e))
logger.error('CPriceMonitoring_MA_Strategy 오류 : %s' % (e))
Telegram('[StockTrader]CPriceMonitoring_MA_Strategy 오류 : %s' % (e))
def 실시간데이터처리(self, param):
try:
if self.running == True:
체결시간 = '%s %s:%s:%s' % (str(self.d), param['체결시간'][0:2], param['체결시간'][2:4], param['체결시간'][4:])
종목코드 = param['종목코드']
현재가 = abs(int(float(param['현재가'])))
전일대비 = int(float(param['전일대비']))
등락률 = float(param['등락률'])
매도호가 = abs(int(float(param['매도호가'])))
매수호가 = abs(int(float(param['매수호가'])))
누적거래량 = abs(int(float(param['누적거래량'])))
시가 = abs(int(float(param['시가'])))
고가 = abs(int(float(param['고가'])))
저가 = abs(int(float(param['저가'])))
거래회전율 = abs(float(param['거래회전율']))
시가총액 = abs(int(float(param['시가총액'])))
종목명 = self.parent.CODE_POOL[종목코드][1] # pool[종목코드] = [시장구분, 종목명, 주식수, 전일종가, 시가총액]
시장구분 = self.parent.CODE_POOL[종목코드][0]
전일종가 = self.parent.CODE_POOL[종목코드][3]
시세 = [현재가, 시가, 고가, 저가, 누적거래량]
self.parent.statusbar.showMessage("[%s] %s %s %s %s" % (체결시간, 종목코드, 종목명, 현재가, 전일대비))
# print("[%s] %s %s %s %s" % (체결시간, 종목코드, 종목명, 현재가, 전일대비))
if len(self.stocklist[종목코드]['모니터링주가']) > 0:
if 현재가 in self.stocklist[종목코드]['모니터링주가']:
Telegram('[StockTrader]%s 주가도달 알림\n현재가 : %s, 시가 : %s, 고가 : %s, 저가 : %s' % (종목명, 현재가, 시가, 고가, 저가))
self.stocklist[종목코드]['모니터링주가'].remove(현재가)
self.MA_Strategy(종목명, 종목코드, 시세)
except Exception as e:
print('CTradeLongTerm_실시간데이터처리 Error : %s, %s' % (종목명, e))
Telegram('[StockTrader]CTradeLongTerm_실시간데이터처리 Error : %s, %s' % (종목명, e), send='mc')
logger.error('CTradeLongTerm_실시간데이터처리 Error :%s, %s' % (종목명, e))
def 접수처리(self, param):
pass
def 체결처리(self, param):
pass
def 잔고처리(self, param):
pass
def Run(self, flag=True, sAccount=None):
self.running = flag
ret = 0
# self.manual_portfolio()
if flag == True:
print("%s ROBOT 실행" % (self.sName))
try:
Telegram("[StockTrader]%s ROBOT 실행" % (self.sName))
self.초기조건()
print('초기조건 설정 완료')
self.실시간종목리스트 = self.모니터링종목
logger.info("오늘 거래 종목 : %s %s" % (self.sName, ';'.join(self.실시간종목리스트) + ';'))
self.KiwoomConnect() # MainWindow 외에서 키움 API구동시켜서 자체적으로 API데이터송수신가능하도록 함
if len(self.실시간종목리스트) > 0:
ret = self.KiwoomSetRealReg(self.sScreenNo, ';'.join(self.실시간종목리스트) + ';')
logger.debug("[%s]실시간데이타요청 등록결과 %s" % (self.sName, ret))
except Exception as e:
print('CPriceMonitoring_Run Error :', e)
Telegram('[StockTrader]CPriceMonitoring_Run Error : %s' % e, send='mc')
logger.error('CPriceMonitoring_Run Error : %s' % e)
else:
Telegram("[StockTrader]%s ROBOT 실행 중지" % (self.sName))
ret = self.KiwoomSetRealRemove(self.sScreenNo, 'ALL')
self.KiwoomDisConnect() # 로봇 클래스 내에서 일별종목별실현손익 데이터를 받고나서 연결 해제시킴
# 메인 화면에 반영
self.parent.RobotView()
##################################################################################
# 메인
##################################################################################
Ui_MainWindow, QtBaseClass_MainWindow = uic.loadUiType("./UI/XTrader_MainWindow.ui")
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
# 화면을 보여주기 위한 코드
super().__init__()
QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.UI_setting()
# 현재 시간 받음
self.시작시각 = datetime.datetime.now()
# 메인윈도우가 뜨고 키움증권과 붙이기 위한 작업
self.KiwoomAPI() # 키움 ActiveX를 메모리에 올림
self.KiwoomConnect() # 메모리에 올라온 ActiveX와 내가 만든 함수 On시리즈와 연결(콜백 : 이벤트가 오면 나를 불러줘)
self.ScreenNumber = 5000
self.robots = []
self.dialog = dict()
# self.dialog['리얼데이타'] = None
# self.dialog['계좌정보조회'] = None
self.model = PandasModel()
self.tableView_robot.setModel(self.model)
self.tableView_robot.setSelectionBehavior(QTableView.SelectRows)
self.tableView_robot.setSelectionMode(QTableView.SingleSelection)
self.tableView_robot.pressed.connect(self.RobotCurrentIndex)
# self.connect(self.tableView_robot.selectionModel(), SIGNAL("currentRowChanged(QModelIndex,QModelIndex)"), self.RobotCurrentIndex)
self.tableView_robot_current_index = None
self.portfolio_model = PandasModel()
self.tableView_portfolio.setModel(self.portfolio_model)
self.tableView_portfolio.setSelectionBehavior(QTableView.SelectRows)
self.tableView_portfolio.setSelectionMode(QTableView.SingleSelection)
# self.portfolio_model.update((DataFrame(columns=['종목코드', '종목명', '매수가', '수량', '매수일'])))
self.robot_columns = ['Robot타입', 'Robot명', 'RobotID', '스크린번호', '실행상태', '포트수', '포트폴리오']
# TODO: 주문제한 설정
self.timer = QTimer(self)
self.timer.timeout.connect(self.limit_per_second) # 초당 4번
# QtCore.QObject.connect(self.timer, QtCore.SIGNAL("timeout()"), self.limit_per_second)
self.timer.start(1000) # 1초마다 리셋
self.ConditionTick = QTimer(self)
self.ConditionTick.timeout.connect(self.OnConditionCheck)
self.주문제한 = 0
self.조회제한 = 0
self.금일백업작업중 = False
self.종목선정작업중 = False
self.ConditionCheck = False
self.조건식저장카운트 = 1
self.DailyData = False # 관심종목 일봉 업데이트
self.InvestorData = False # 관심종목 종목별투자자 업데이트
self.df_daily = DataFrame()
self.df_weekly = DataFrame()
self.df_monthly = DataFrame()
self.df_investor = DataFrame()
self._login = False
self.KiwoomLogin() # 프로그램 실행 시 자동로그인
self.CODE_POOL = self.get_code_pool() # DB 종목데이블에서 시장구분, 코드, 종목명, 주식수, 전일종가 읽어옴
# 화면 Setting
def UI_setting(self):
self.setupUi(self)
self.setWindowTitle("XTrader")
self.setWindowIcon(QIcon('./PNG/icon_stock.png'))
self.actionLogin.setIcon(QIcon('./PNG/Internal.png'))
self.actionLogout.setIcon(QIcon('./PNG/External.png'))
self.actionExit.setIcon(QIcon('./PNG/Approval.png'))
self.actionAccountDialog.setIcon(QIcon('./PNG/Sales Performance.png'))
self.actionMinutePrice.setIcon(QIcon('./PNG/Candle Sticks.png'))
self.actionDailyPrice.setIcon(QIcon('./PNG/Overtime.png'))
self.actionInvestors.setIcon(QIcon('./PNG/Conference Call.png'))
self.actionSectorView.setIcon(QIcon('./PNG/Organization.png'))
self.actionSectorPriceView.setIcon(QIcon('./PNG/Ratings.png'))
self.actionCodeBuild.setIcon(QIcon('./PNG/Inspection.png'))
self.actionRobotOneRun.setIcon(QIcon('./PNG/Process.png'))
self.actionRobotOneStop.setIcon(QIcon('./PNG/Cancel 2.png'))
self.actionRobotMonitoringStop.setIcon(QIcon('./PNG/Cancel File.png'))
self.actionRobotRun.setIcon(QIcon('./PNG/Checked.png'))
self.actionRobotStop.setIcon(QIcon('./PNG/Cancel.png'))
self.actionRobotRemove.setIcon(QIcon('./PNG/Delete File.png'))
self.actionRobotClear.setIcon(QIcon('./PNG/Empty Trash.png'))
self.actionRobotView.setIcon(QIcon('./PNG/Checked 2.png'))
self.actionRobotSave.setIcon(QIcon('./PNG/Download.png'))
self.actionTradeShortTerm.setIcon(QIcon('./PNG/Bullish.png'))
self.actionTradeCondition.setIcon(QIcon('./PNG/Search.png'))
self.actionConditionMonitoring.setIcon(QIcon('./PNG/Binoculars.png'))
# 종목 선정
def stock_analysis(self):
try:
self.AnalysisPriceList = self.AnalysisPriceList
except:
for robot in self.robots:
if robot.sName == 'TradeShortTerm':
self.AnalysisPriceList = robot.Stocklist['전략']['시세조회단위']
self.종목선정데이터 = pd.DataFrame(shortterm_analysis_sheet.get_all_records()) # shortterm_analysis_sheet
self.종목선정데이터 = self.종목선정데이터[['번호', '종목명']]
row = []
# print(self.종목선정데이터)
for name in self.종목선정데이터['종목명'].values:
try:
code, name, market = get_code(name)
except Exception as e:
code = ''
print('get_code Error :', name, e)
row.append(code)
self.종목선정데이터['종목코드'] = row
self.종목선정데이터 = self.종목선정데이터[self.종목선정데이터['종목코드'] != '']
print(self.종목선정데이터)
self.종목리스트 = list(self.종목선정데이터[['번호', '종목명', '종목코드']].values)
self.종목코드 = self.종목리스트.pop(0)
if self.DailyData == True:
self.start = datetime.datetime.now()
print(self.start)
self.ReguestPriceDaily()
elif self.InvestorData == True:
self.RequestInvestorDaily()
elif self.WeeklyData == True:
self.ReguestPriceWeekly()
elif self.MonthlyData == True:
self.ReguestPriceMonthly()
# 일봉데이터조희
def ReguestPriceDaily(self, _repeat=0):
try:
기준일자 = datetime.date.today().strftime('%Y%m%d')
self.종목일봉 = []
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", self.종목코드[2])
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "기준일자", 기준일자)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "수정주가구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "주식일봉차트조회", "OPT10081",
_repeat,
'{:04d}'.format(self.ScreenNumber))
self.statusbar.showMessage("관심종목 일봉 데이터 : %s %s %s" % (self.종목코드[0], self.종목코드[1], self.종목코드[2]))
except Exception as e:
print(e)
# 주봉데이터조회
def ReguestPriceWeekly(self, _repeat=0):
try:
기준일자 = datetime.date.today().strftime('%Y%m%d')
self.종목주봉 = []
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", self.종목코드[2])
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "기준일자", 기준일자)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "수정주가구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "주식주봉차트조회", "OPT10082",
_repeat,
'{:04d}'.format(self.ScreenNumber))
self.statusbar.showMessage("관심종목 주봉 데이터 : %s %s %s" % (self.종목코드[0], self.종목코드[1], self.종목코드[2]))
except Exception as e:
print(e)
# 월봉데이터조회
def ReguestPriceMonthly(self, _repeat=0):
try:
기준일자 = datetime.date.today().strftime('%Y%m%d')
self.종목월봉 = []
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", self.종목코드[2])
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "기준일자", 기준일자)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "수정주가구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "주식월봉차트조회", "OPT10083",
_repeat,
'{:04d}'.format(self.ScreenNumber))
self.statusbar.showMessage("관심종목 월봉 데이터 : %s %s %s" % (self.종목코드[0], self.종목코드[1], self.종목코드[2]))
except Exception as e:
print(e)
# 종목별투자자조희
def RequestInvestorDaily(self, _repeat=0):
기준일자 = datetime.date.today().strftime('%Y%m%d')
self.종목별투자자 = []
try:
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "일자", 기준일자)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", self.종목코드[2])
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, int)', "금액수량구분", 2) # 1:금액, 2:수량
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, int)', "매매구분", 0) # 0:순매수, 1:매수, 2:매도
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, int)', "단위구분", 1) # 1000:천주, 1:단주
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "종목별투자자조회", "OPT10060",
_repeat,
'{:04d}'.format(self.ScreenNumber))
self.statusbar.showMessage("관심종목 종목별투자자 데이터 : %s %s %s" % (self.종목코드[0], self.종목코드[1], self.종목코드[2]))
except Exception as e:
print(e)
# DB 데이터 저장
def UploadAnalysisData(self, data, 구분):
# shortterm_analysis_sheet = test_analysis_sheet
row = []
if 구분 == '일봉':
try:
data['일봉1'] = data['현재가'].rolling(window=self.AnalysisPriceList[0]).mean()
data['일봉2'] = data['현재가'].rolling(window=self.AnalysisPriceList[1]).mean()
data['일봉3'] = data['현재가'].rolling(window=self.AnalysisPriceList[2]).mean()
data['일봉4'] = data['현재가'].rolling(window=self.AnalysisPriceList[3]).mean()
result = data.iloc[-1].values
# 구글 업로드
# row.append(self.종목코드[0])
# row.append(str(round((result[3] / int(result[1]) - 1) * 100, 2)) + '%')
# row.append(str(round((result[4] / int(result[1]) - 1) * 100, 2)) + '%')
# row.append(str(round((result[5] / int(result[1]) - 1) * 100, 2)) + '%')
# row.append(str(round((result[6] / int(result[1]) - 1) * 100, 2)) + '%')
# row.append(str(round((int(data.iloc[-2]['거래량']) / int(data.iloc[-1]['거래량']) - 1) * 100, 2)) + '%')
# print(row)
#
# code_row = shortterm_analysis_sheet.findall(row[0])[-1].row
#
# cell = alpha_list[shortterm_analysis_cols.index('일봉1')] + str(code_row)
# shortterm_analysis_sheet.update_acell(cell, row[1])
# cell = alpha_list[shortterm_analysis_cols.index('일봉2')] + str(code_row)
# shortterm_analysis_sheet.update_acell(cell, row[2])
# cell = alpha_list[shortterm_analysis_cols.index('일봉3')] + str(code_row)
# shortterm_analysis_sheet.update_acell(cell, row[3])
# cell = alpha_list[shortterm_analysis_cols.index('일봉4')] + str(code_row)
# shortterm_analysis_sheet.update_acell(cell, row[4])
# cell = alpha_list[shortterm_analysis_cols.index('거래량')] + str(code_row)
# shortterm_analysis_sheet.update_acell(cell, row[5])
# DB 저장
dict = {'번호': [],
'종목명': [],
'종목코드': [],
'일봉1': [],
'일봉2': [],
'일봉3': [],
'일봉4': [],
'거래량': []}
dict['번호'].append(str(self.종목코드[0]))
dict['종목명'].append(self.종목코드[1])
dict['종목코드'].append(self.종목코드[2])
dict['일봉1'].append(str(round((result[3] / int(result[1]) - 1) * 100, 2)) + '%')
dict['일봉2'].append(str(round((result[4] / int(result[1]) - 1) * 100, 2)) + '%')
dict['일봉3'].append(str(round((result[5] / int(result[1]) - 1) * 100, 2)) + '%')
dict['일봉4'].append(str(round((result[6] / int(result[1]) - 1) * 100, 2)) + '%')
dict['거래량'].append(
str(round((int(data.iloc[-2]['거래량']) / int(data.iloc[-1]['거래량']) - 1) * 100, 2)) + '%')
temp = DataFrame(dict)
self.df_daily = pd.concat([self.df_daily, temp])
except Exception as e:
print('UploadDailyPriceData Error : ', e)
elif 구분 == '주봉':
try:
data['주봉1'] = data['현재가'].rolling(window=self.AnalysisPriceList[4]).mean()
result = data.iloc[-1].values
# 구글 업로드
# row.append(self.종목코드[0])
# row.append(str(round((result[2] / int(result[1]) - 1) * 100, 2)) + '%')
# print(row)
#
# code_row = shortterm_analysis_sheet.findall(row[0])[-1].row
#
# cell = alpha_list[shortterm_analysis_cols.index('주봉1')] + str(code_row)
# shortterm_analysis_sheet.update_acell(cell, row[1])
# DB 저장
dict = {'종목코드': [],
'주봉1': []
}
dict['종목코드'].append(self.종목코드[2])
dict['주봉1'].append(str(round((result[2] / int(result[1]) - 1) * 100, 2)) + '%')
temp = DataFrame(dict)
self.df_weekly = pd.concat([self.df_weekly, temp])
except Exception as e:
print('UploadWeeklyPriceData Error : ', e)
elif 구분 == '월봉':
try:
data['월봉1'] = data['현재가'].rolling(window=self.AnalysisPriceList[5]).mean()
result = data.iloc[-1].values
# 구글 업로드
# row.append(self.종목코드[0])
# row.append(str(round((result[2] / int(result[1]) - 1) * 100, 2)) + '%')
# print(row)
#
# code_row = shortterm_analysis_sheet.findall(row[0])[-1].row
#
# cell = alpha_list[shortterm_analysis_cols.index('월봉1')] + str(code_row)
# shortterm_analysis_sheet.update_acell(cell, row[1])
# DB 저장
dict = {'종목코드': [],
'월봉1': []
}
dict['종목코드'].append(self.종목코드[2])
dict['월봉1'].append(str(round((result[2] / int(result[1]) - 1) * 100, 2)) + '%')
temp = DataFrame(dict)
self.df_monthly = pd.concat([self.df_monthly, temp])
except Exception as e:
print('UploadmonthlyPriceData Error : ', e)
elif 구분 == '종목별투자자':
try:
result = data.iloc[-1].values
# 구글 업로드
# row.append(self.종목코드[0])
# row.append(result[1]) # 기관
# row.append(result[2]) # 외국인
# row.append(result[3]) # 개인
# print(row)
#
# code_row = shortterm_analysis_sheet.findall(row[0])[-1].row
#
# cell = alpha_list[shortterm_analysis_cols.index('기관수급')] + str(code_row)
# shortterm_analysis_sheet.update_acell(cell, row[1])
# cell = alpha_list[shortterm_analysis_cols.index('외인수급')] + str(code_row)
# shortterm_analysis_sheet.update_acell(cell, row[2])
# cell = alpha_list[shortterm_analysis_cols.index('개인')] + str(code_row)
# shortterm_analysis_sheet.update_acell(cell, row[3])
# DB 저장
dict = {'종목코드': [],
'기관': [],
'외인': [],
'개인': []}
dict['종목코드'].append(self.종목코드[2])
dict['기관'].append(result[1]) # 기관
dict['외인'].append(result[2]) # 외국인
dict['개인'].append(result[3]) # 개인
temp = DataFrame(dict)
self.df_investor = pd.concat([self.df_investor, temp])
except Exception as e:
print('UploadDailyInvestorData Error : ', e)
# DB에 저장된 상장 종목 코드 읽음
def get_code_pool(self):
query = """
select 시장구분, 종목코드, 종목명, 주식수, 전일종가, 전일종가*주식수 as 시가총액
from 종목코드
order by 시장구분, 종목코드
"""
conn = sqliteconn()
df = pd.read_sql(query, con=conn)
conn.close()
pool = dict()
for idx, row in df.iterrows():
시장구분, 종목코드, 종목명, 주식수, 전일종가, 시가총액 = row
pool[종목코드] = [시장구분, 종목명, 주식수, 전일종가, 시가총액]
return pool
# 구글스프레드시트 종목 Import
def Import_ShortTermStock(self, check):
try:
data = import_googlesheet()
if check == False:
# # 매수 전략별 별도 로봇 운영 시
# # 매수 전략 확인
# strategy_list = list(data['매수전략'].unique())
#
# # 로딩된 로봇을 robot_list에 저장
# robot_list = []
# for robot in self.robots:
# robot_list.append(robot.sName.split('_')[0])
#
# # 매수 전략별 로봇 자동 편집/추가
# for strategy in strategy_list:
# df_stock = data[data['매수전략'] == strategy]
#
# if strategy in robot_list:
# print('로봇 편집')
# Telegram('[StockTrader]로봇 편집')
# for robot in self.robots:
# if robot.sName.split('_')[0] == strategy:
# self.RobotAutoEdit_TradeShortTerm(robot, df_stock)
# self.RobotView()
# break
# else:
# print('로봇 추가')
# Telegram('[StockTrader]로봇 추가')
# self.RobotAutoAdd_TradeShortTerm(df_stock, strategy)
# self.RobotView()
# 로딩된 로봇을 robot_list에 저장
robot_list = []
for robot in self.robots:
robot_list.append(robot.sName)
if 'TradeShortTerm' in robot_list:
for robot in self.robots:
if robot.sName == 'TradeShortTerm':
print('로봇 편집')
logger.debug('로봇 편집')
self.RobotAutoEdit_TradeShortTerm(robot, data)
self.RobotView()
break
else:
print('로봇 추가')
logger.debug('로봇 추가')
self.RobotAutoAdd_TradeShortTerm(data)
self.RobotView()
# print("로봇 준비 완료")
# Slack('[XTrader]로봇 준비 완료')
# logger.info("로봇 준비 완료")
except Exception as e:
print('MainWindow_Import_ShortTermStock Error', e)
Telegram('[StockTrader]MainWindow_Import_ShortTermStock Error : %s' % e, send='mc')
logger.error('MainWindow_Import_ShortTermStock Error : %s' % e)
# 금일 매도 종목에 대해서 수익률, 수익금, 수수료 요청(일별종목별실현손익요청)
# def DailyProfit(self, 금일매도종목):
# _repeat = 0
# # self.sAccount = 로봇거래계좌번호
# # self.sScreenNo = self.ScreenNumber
# 시작일자 = datetime.date.today().strftime('%Y%m%d')
# cnt=1
# for 종목코드 in 금일매도종목:
# self.update_cnt = len(금일매도종목) - cnt
# cnt += 1
# ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", self.sAccount)
# ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", 종목코드)
# ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "시작일자", 시작일자)
# ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "일자별종목별실현손익요청", "OPT10072", _repeat, '{:04d}'.format(self.ScreenNumber))
#
# self.DailyProfitLoop = QEventLoop() # 로봇에서 바로 쓸 수 있도록하기 위해서 계좌 조회해서 종목을 받고나서 루프해제시킴
# self.DailyProfitLoop.exec_()
# 일별종목별실현손익 응답 결과 구글 업로드
# def DailyProfitUpload(self, 매도결과):
# # 매도결과 ['종목명','체결량','매입단가','체결가','당일매도손익','손익율','당일매매수수료','당일매매세금']
# print(매도결과)
#
# for r in self.robots:
# if r.sName == 'TradeShortTerm':
# history_sheet = history_sheet
# history_cols = history_cols
# elif r.sName == 'TradeCondition':
# history_sheet = condition_history_sheet
# history_cols = condition_history_cols
#
# code_row = history_sheet.findall(매도결과[0])[-1].row
#
# 계산수익률 = round((int(float(매도결과[3])) / int(float(매도결과[2])) - 1) * 100, 2)
#
# cell = alpha_list[history_cols.index('매수가')] + str(code_row) # 매입단가
# history_sheet.update_acell(cell, int(float(매도결과[2])))
#
# cell = alpha_list[history_cols.index('매도가')] + str(code_row) # 체결가
# history_sheet.update_acell(cell, int(float(매도결과[3])))
#
# cell = alpha_list[history_cols.index('수익률(계산)')] + str(code_row) # 수익률 계산
# history_sheet.update_acell(cell, 계산수익률)
#
# cell = alpha_list[history_cols.index('수익률')] + str(code_row) # 손익율
# history_sheet.update_acell(cell, 매도결과[5])
#
# cell = alpha_list[history_cols.index('수익금')] + str(code_row) # 손익율
# history_sheet.update_acell(cell, int(float(매도결과[4])))
#
# cell = alpha_list[history_cols.index('세금+수수료')] + str(code_row) # 당일매매수수료 + 당일매매세금
# history_sheet.update_acell(cell, int(float(매도결과[6])) + int(float(매도결과[7])))
#
# self.DailyProfitLoop.exit()
#
# if self.update_cnt == 0:
# print('금일 실현 손익 구글 업로드 완료')
# Slack("[XTrader]금일 실현 손익 구글 업로드 완료")
# logger.info("[XTrader]금일 실현 손익 구글 업로드 완료")
# 조건 검색식 읽어서 해당 종목 저장
def GetCondition(self):
# logger.info("조건 검색식 종목 읽기")
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer)
self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition)
conditions = ['매물대거래량','외국인기관수급', '주도주', '당일주도주', '기본주도주','스토캐스틱&MACD&거래량회전율', '갭상승']
try:
self.getConditionLoad()
self.conditionid = []
self.conditionname = []
for index in self.condition.keys(): # condition은 dictionary
# print(self.condition)
if self.condition[index] in conditions:
self.conditionid.append(str(index))
self.conditionname.append(self.condition[index])
print('조건 검색 시작')
print(index, self.condition[index])
self.sendCondition("0156", self.condition[index], index, 0)
except Exception as e:
print("GetCondition_Error")
print(e)
finally:
# print(self.df_condition)
query = """
select * from 조건검색식
"""
conn = sqliteconn()
df = pd.read_sql(query, con=conn)
conn.close()
df = df.drop_duplicates(['카운트', '종목명'], keep='first')
df = df.sort_values(by=['카운트','인덱스']).reset_index(drop=True)
savetime = today.strftime('%Y%m%d') + '_'+ current_time.replace(':','')
df.to_csv(savetime +"_조건검색종목.csv", encoding='euc-kr', index=False)
self.조건식저장카운트 += 1
self.ConditionCheck = False
logger.info("조건 검색식 종목 저장완료")
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].disconnect(self.OnReceiveTrCondition)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
self.kiwoom.OnReceiveConditionVer[int, str].disconnect(self.OnReceiveConditionVer)
self.kiwoom.OnReceiveRealCondition[str, str, str, str].disconnect(self.OnReceiveRealCondition)
# 조건식 목록 요청 메서드
def getConditionLoad(self):
self.kiwoom.dynamicCall("GetConditionLoad()")
# receiveConditionVer() 이벤트 메서드에서 루프 종료
self.conditionLoop = QEventLoop()
self.conditionLoop.exec_()
# 조건식 획득 메서드
def getConditionNameList(self):
# 조건식을 딕셔너리 형태로 반환합니다.
# 이 메서드는 반드시 receiveConditionVer() 이벤트 메서드안에서 사용해야 합니다.
#
# :return: dict - {인덱스:조건명, 인덱스:조건명, ...}
data = self.kiwoom.dynamicCall("GetConditionNameList()")
conditionList = data.split(';')
del conditionList[-1]
conditionDictionary = {}
for condition in conditionList:
key, value = condition.split('^')
conditionDictionary[int(key)] = value
return conditionDictionary
# 종목 조건검색 요청 메서드
def sendCondition(self, screenNo, conditionName, conditionIndex, isRealTime):
# 이 메서드로 얻고자 하는 것은 해당 조건에 맞는 종목코드이다.
# 해당 종목에 대한 상세정보는 setRealReg() 메서드로 요청할 수 있다.
# 요청이 실패하는 경우는, 해당 조건식이 없거나, 조건명과 인덱스가 맞지 않거나, 조회 횟수를 초과하는 경우 발생한다.
#
# 조건검색에 대한 결과는
# 1회성 조회의 경우, receiveTrCondition() 이벤트로 결과값이 전달되며
# 실시간 조회의 경우, receiveTrCondition()과 receiveRealCondition() 이벤트로 결과값이 전달된다.
#
# :param screenNo: string
# :param conditionName: string - 조건식 이름
# :param conditionIndex: int - 조건식 인덱스
# :param isRealTime: int - 조건검색 조회구분(0: 1회성 조회, 1: 실시간 조회)
isRequest = self.kiwoom.dynamicCall("SendCondition(QString, QString, int, int)",
screenNo, conditionName, conditionIndex, isRealTime)
# receiveTrCondition() 이벤트 메서드에서 루프 종료
self.conditionLoop = QEventLoop()
self.conditionLoop.exec_()
# 프로그램 실행 3초 후 실행
def OnQApplicationStarted(self):
# 1. 8시 58분 이전일 경우 5분 단위 구글시트 오퓨 체크 타이머 시작시킴
current = datetime.datetime.now()
current_time = current.strftime('%H:%M:%S')
"""
if '07:00:00' <= current_time and current_time <= '08:58:00':
print('구글 시트 오류 체크 시작')
# Telegram('[StockTrader]구글 시트 오류 체크 시작')
self.statusbar.showMessage("구글 시트 오류 체크 시작")
self.checkclock = QTimer(self)
self.checkclock.timeout.connect(self.OnGoogleCheck) # 5분마다 구글 시트 읽음 : MainWindow.OnGoogleCheck 실행
self.checkclock.start(300000) # 300000초마다 타이머 작동
"""
# 2. DB에 저장된 로봇 정보받아옴
global 로봇거래계좌번호
try:
with sqlite3.connect(DATABASE) as conn:
cursor = conn.cursor()
cursor.execute("select value from Setting where keyword='robotaccount'")
for row in cursor.fetchall():
# _temp = base64.decodestring(row[0]) # base64에 text화해서 암호화 : DB에 잘 넣기 위함
_temp = base64.decodebytes(row[0])
로봇거래계좌번호 = pickle.loads(_temp)
print('로봇거래계좌번호', 로봇거래계좌번호)
cursor.execute('select uuid, strategy, name, robot from Robots')
self.robots = []
for row in cursor.fetchall():
uuid, strategy, name, robot_encoded = row
robot = base64.decodebytes(robot_encoded)
# r = base64.decodebytes(robot_encoded)
r = pickle.loads(robot)
r.kiwoom = self.kiwoom
r.parent = self
r.d = today
r.running = False
# logger.debug(r.sName, r.UUID, len(r.portfolio))
self.robots.append(r)
except Exception as e:
print('OnQApplicationStarted', e)
self.RobotView()
# 프로그램 실행 후 1초 마다 실행 : 조건에 맞는 시간이 되면 백업 시작
def OnClockTick(self):
current = datetime.datetime.now()
global current_time
current_time = current.strftime('%H:%M:%S')
# 8시 32분 : 종목 데이블 생성
if current_time == '08:32:00':
print('종목테이블 생성')
# Slack('[XTrader]종목테이블 생성')
self.StockCodeBuild(to_db=True)
self.CODE_POOL = self.get_code_pool() # DB 종목데이블에서 시장구분, 코드, 종목명, 주식수, 전일종가 읽어옴
self.statusbar.showMessage("종목테이블 생성")
"""
# 8시 59분 : 구글 시트 종목 Import
if current_time == '08:59:00':
print('구글 시트 오류 체크 중지')
# Telegram('[StockTrader]구글 시트 오류 체크 중지')
self.checkclock.stop()
robot_list = []
for robot in self.robots:
robot_list.append(robot.sName)
if 'TradeShortTerm' in robot_list:
print('구글시트 Import')
Telegram('[StockTrader]구글시트 Import')
self.Import_ShortTermStock(check=False)
self.statusbar.showMessage('구글시트 Import')
"""
# 8시 59분 30초 : 로봇 실행
if '09:00:00' <= current_time and current_time < '09:00:05':
try:
if len(self.robots) > 0:
for r in self.robots:
if r.running == False: # 로봇이 실행중이 아니면
r.Run(flag=True, sAccount=로봇거래계좌번호)
self.RobotView()
except Exception as e:
print('Robot Auto Run Error', e)
Telegram('[StockTrader]Robot Auto Run Error : %s' % e, send='mc')
logger.error('Robot Auto Run Error : %s' % e)
# TradeShortTerm 보유일 만기 매도 전략 체크용
# if current_time >= '15:29:00' and current_time < '15:29:30':
# if len(self.robots) > 0:
# for r in self.robots:
# if r.sName == 'TradeShortTerm':
# if r.holdcheck == False:
# r.holdcheck = True
# r.hold_strategy()
# 15시 17분 :TradeCondition 당일청산 매도 실행
if current_time >= '15:17:00' and current_time < '15:17:30':
if len(self.robots) > 0:
for r in self.robots:
if r.sName == 'TradeCondition' and '당일청산' in r.조건식명:
if r.clearcheck == False:
r.clearcheck = True
r.clearning_strategy()
# 16시 00분 : 로봇 정지
if '15:40:00' <= current_time and current_time < '15:40:05':
self.RobotStop()
# 16시 05분 : 프로그램 종료
if '15:45:00' <= current_time and current_time < '15:45:05':
quit()
# 18시 00분 : 종목 분석을 위한 일봉, 종목별투자자정보 업데이트
# if '18:00:00' <= current_time and current_time < '18:00:05':
# if self.DailyData == False:
# self.DailyData = True
# self.WeeklyData = False
# self.MonthlyData = False
# self.InvestorData = False
# Telegram("[XTrader]관심종목 데이터 업데이트", send='mc')
# self.stock_analysis()
# if '153600' < current_time and current_time < '153659' and self.금일백업작업중 == False and self._login == True:# and current.weekday() == 4:
# 수능일이면 아래 시간 조건으로 수정
# if '17:00:00' < current.strftime('%H:%M:%S') and current.strftime('%H:%M:%S') < '17:00:59' and self.금일백업작업중 == False and self._login == True:
# self.금일백업작업중 = True
# self.Backup(작업=None)
# pass
# 로봇을 저장
# if self.시작시각.strftime('%H:%M:%S') > '08:00:00' and self.시작시각.strftime('%H:%M:%S') < '15:30:00' and current.strftime('%H:%M:%S') > '01:00:00':
# if len(self.robots) > 0:
# self.RobotSave()
# for k in self.dialog:
# self.dialog[k].KiwoomDisConnect()
# try:
# self.dialog[k].close()
# except Exception as e:
# pass
# self.close()
# 지정 시간에 로봇을 중지한다던가 원하는 실행을 아래 pass에 작성
# if current_time > '08:58:00' and current_time <= '15:30:00':
# if current.second == 0 and current.minute % 3 == 0 and self.ConditionCheck == False:
# self.ConditionCheck = True
# self.GetCondition()
# if current.weekday() in workday_list: # 주중인지 확인
# if current_time in savetime_list: # 지정된 시간인지 확인
# logger.info("조건검색식 타이머 작동")
# Telegram(str(current)[:-7] + " : " + "조건검색식 종목 검색")
# self.GetCondition() # 조건검색식을 모두 읽어서 해당하는 종목 저장
# if current.second == 0: # 매 0초
# # if current.minute % 10 == 0: # 매 10 분
# if current.minute == 1 or current.strftime('%H:%M:%S') == '09:30:00' or current.strftime('%H:%M:%S') == '15:15:00': # 매시 1분
# logger.info("조건검색식 타이머 작동")
# Telegram(str(current)[:-7] + " : " + "조건검색식 종목 검색")
# # print(current.minute, current.second)
# self.GetCondition() # 조건검색식을 모두 읽어서 해당하는 종목 저장
# for r in self.robots:
# if r.running == True: # 로봇이 실행중이면
# # print(r.sName, r.running)
# pass
# 주문 제한 초기화
def limit_per_second(self):
self.주문제한 = 0
self.조회제한 = 0
# logger.info("초당제한 주문 클리어")
def OnConditionCheck(self):
try:
current = datetime.datetime.now()
if current.second == 0 and current.minute % 3 == 0:
for robot in self.robots:
if 'TradeCondition' in robot.sName:
if robot.조건검색타입 == 0:
robot.ConditionCheck()
except Exception as e:
print(e)
# 5분 마다 실행 : 구글 스프레드 시트 오류 확인
def OnGoogleCheck(self):
self.Import_ShortTermStock(check=True)
# 메인 윈도우에서의 모든 액션에 대한 처리
def MENU_Action(self, qaction):
logger.debug("Action Slot %s %s " % (qaction.objectName(), qaction.text()))
try:
_action = qaction.objectName()
if _action == "actionExit":
if len(self.robots) > 0:
self.RobotSave()
for k in self.dialog:
self.dialog[k].KiwoomDisConnect()
try:
self.dialog[k].close()
except Exception as e:
pass
self.close()
elif _action == "actionLogin":
self.KiwoomLogin()
elif _action == "actionLogout":
self.KiwoomLogout()
elif _action == "actionDailyPrice":
# self.F_dailyprice()
if self.dialog.get('일자별주가') is not None:
try:
self.dialog['일자별주가'].show()
except Exception as e:
self.dialog['일자별주가'] = 화면_일별주가(sScreenNo=9902, kiwoom=self.kiwoom, parent=self)
self.dialog['일자별주가'].KiwoomConnect()
self.dialog['일자별주가'].show()
else:
self.dialog['일자별주가'] = 화면_일별주가(sScreenNo=9902, kiwoom=self.kiwoom, parent=self)
self.dialog['일자별주가'].KiwoomConnect()
self.dialog['일자별주가'].show()
elif _action == "actionMinutePrice":
# self.F_minprice()
if self.dialog.get('분별주가') is not None:
try:
self.dialog['분별주가'].show()
except Exception as e:
self.dialog['분별주가'] = 화면_분별주가(sScreenNo=9903, kiwoom=self.kiwoom, parent=self)
self.dialog['분별주가'].KiwoomConnect()
self.dialog['분별주가'].show()
else:
self.dialog['분별주가'] = 화면_분별주가(sScreenNo=9903, kiwoom=self.kiwoom, parent=self)
self.dialog['분별주가'].KiwoomConnect()
self.dialog['분별주가'].show()
elif _action == "actionInvestors":
# self.F_investor()
if self.dialog.get('종목별투자자') is not None:
try:
self.dialog['종목별투자자'].show()
except Exception as e:
self.dialog['종목별투자자'] = 화면_종목별투자자(sScreenNo=9904, kiwoom=self.kiwoom, parent=self)
self.dialog['종목별투자자'].KiwoomConnect()
self.dialog['종목별투자자'].show()
else:
self.dialog['종목별투자자'] = 화면_종목별투자자(sScreenNo=9904, kiwoom=self.kiwoom, parent=self)
self.dialog['종목별투자자'].KiwoomConnect()
self.dialog['종목별투자자'].show()
elif _action == "actionAccountDialog": # 계좌정보조회
if self.dialog.get('계좌정보조회') is not None: # dialog : __init__()에 dict로 정의됨
try:
self.dialog['계좌정보조회'].show()
except Exception as e:
self.dialog['계좌정보조회'] = 화면_계좌정보(sScreenNo=7000, kiwoom=self.kiwoom,
parent=self) # self는 메인윈도우, 계좌정보윈도우는 자식윈도우/부모는 메인윈도우
self.dialog['계좌정보조회'].KiwoomConnect()
self.dialog['계좌정보조회'].show()
else:
self.dialog['계좌정보조회'] = 화면_계좌정보(sScreenNo=7000, kiwoom=self.kiwoom, parent=self)
self.dialog['계좌정보조회'].KiwoomConnect()
self.dialog['계좌정보조회'].show()
elif _action == "actionSectorView":
# self.F_sectorview()
if self.dialog.get('업종정보조회') is not None:
try:
self.dialog['업종정보조회'].show()
except Exception as e:
self.dialog['업종정보조회'] = 화면_업종정보(sScreenNo=9900, kiwoom=self.kiwoom, parent=self)
self.dialog['업종정보조회'].KiwoomConnect()
self.dialog['업종정보조회'].show()
else:
self.dialog['업종정보조회'] = 화면_업종정보(sScreenNo=9900, kiwoom=self.kiwoom, parent=self)
self.dialog['업종정보조회'].KiwoomConnect()
self.dialog['업종정보조회'].show()
elif _action == "actionSectorPriceView":
# self.F_sectorpriceview()
if self.dialog.get('업종별주가조회') is not None:
try:
self.dialog['업종별주가조회'].show()
except Exception as e:
self.dialog['업종별주가조회'] = 화면_업종별주가(sScreenNo=9900, kiwoom=self.kiwoom, parent=self)
self.dialog['업종별주가조회'].KiwoomConnect()
self.dialog['업종별주가조회'].show()
else:
self.dialog['업종별주가조회'] = 화면_업종별주가(sScreenNo=9900, kiwoom=self.kiwoom, parent=self)
self.dialog['업종별주가조회'].KiwoomConnect()
self.dialog['업종별주가조회'].show()
elif _action == "actionTradeShortTerm":
self.RobotAdd_TradeShortTerm()
self.RobotView()
elif _action == "actionTradeCondition": # 키움 조건검색식을 이용한 트레이딩
# print("MainWindow : MENU_Action_actionTradeCondition")
self.RobotAdd_TradeCondition()
self.RobotView()
elif _action == "actionConditionMonitoring":
print("MainWindow : MENU_Action_actionConditionMonitoring")
self.ConditionMonitoring()
elif _action == "actionTradeLongTerm":
self.RobotAdd_TradeLongTerm()
self.RobotView()
elif _action == "actionPriceMonitoring":
self.RobotAdd_PriceMonitoring()
self.RobotView()
elif _action == "actionRobotLoad":
self.RobotLoad()
self.RobotView()
elif _action == "actionRobotSave":
self.RobotSave()
elif _action == "actionRobotOneRun":
self.RobotOneRun()
self.RobotView()
elif _action == "actionRobotOneStop":
self.RobotOneStop()
self.RobotView()
elif _action == "actionRobotMonitoringStop":
self.RobotOneMonitoringStop()
self.RobotView()
elif _action == "actionRobotRun":
self.RobotRun()
self.RobotView()
elif _action == "actionRobotStop":
self.RobotStop()
self.RobotView()
elif _action == "actionRobotRemove":
self.RobotRemove()
self.RobotView()
elif _action == "actionRobotClear":
self.RobotClear()
self.RobotView()
elif _action == "actionRobotView":
self.RobotView()
for r in self.robots:
logger.debug('%s %s %s %s' % (r.sName, r.UUID, len(r.portfolio), r.GetStatus()))
elif _action == "actionCodeBuild":
self.종목코드 = self.StockCodeBuild(to_db=True)
QMessageBox.about(self, "종목코드 생성", " %s 항목의 종목코드를 생성하였습니다." % (len(self.종목코드.index)))
elif _action == "actionTest":
# self.DailyData = True
# self.WeeklyData = False
# self.MonthlyData = False
# self.InvestorData = False
# self.stock_analysis()
# print(self.robots)
# for robot in self.robots:
# if robot.sName == 'TradeShortTerm':
# print(robot.Stocklist['전략']['시세조회단위'])
self.GetCondition()
except Exception as e:
print(e)
# 키움증권 OpenAPI
# 키움API ActiveX를 메모리에 올림
def KiwoomAPI(self):
self.kiwoom = QAxWidget("KHOPENAPI.KHOpenAPICtrl.1")
# 메모리에 올라온 ActiveX와 On시리즈와 붙임(콜백 : 이벤트가 오면 나를 불러줘)
def KiwoomConnect(self):
self.kiwoom.OnEventConnect[int].connect(
self.OnEventConnect) # 키움의 OnEventConnect와 이 프로그램의 OnEventConnect 함수와 연결시킴
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
# self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
self.kiwoom.OnReceiveChejanData[str, int, str].connect(self.OnReceiveChejanData)
# self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer)
# self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition)
self.kiwoom.OnReceiveRealData[str, str, str].connect(self.OnReceiveRealData)
# ActiveX와 On시리즈 연결 해제
def KiwoomDisConnect(self):
print('MainWindow KiwoomDisConnect')
self.kiwoom.OnEventConnect[int].disconnect(self.OnEventConnect)
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
# self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].disconnect(self.OnReceiveTrCondition)
# self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
self.kiwoom.OnReceiveChejanData[str, int, str].disconnect(self.OnReceiveChejanData)
# self.kiwoom.OnReceiveConditionVer[int, str].disconnect(self.OnReceiveConditionVer)
# self.kiwoom.OnReceiveRealCondition[str, str, str, str].disconnect(self.OnReceiveRealCondition)
self.kiwoom.OnReceiveRealData[str, str, str].disconnect(self.OnReceiveRealData)
# 키움 로그인
def KiwoomLogin(self):
self.kiwoom.dynamicCall("CommConnect()")
self._login = True
self.statusbar.showMessage("로그인...")
# 키움 로그아웃
def KiwoomLogout(self):
if self.kiwoom is not None:
self.kiwoom.dynamicCall("CommTerminate()")
self.statusbar.showMessage("연결해제됨...")
# 계좌 보유 종목 받음
def InquiryList(self, _repeat=0):
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", self.sAccount)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "비밀번호입력매체구분", '00')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "조회구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "계좌평가잔고내역요청", "opw00018",
_repeat, '{:04d}'.format(self.ScreenNumber))
self.InquiryLoop = QEventLoop() # 로봇에서 바로 쓸 수 있도록하기 위해서 계좌 조회해서 종목을 받고나서 루프해제시킴
self.InquiryLoop.exec_()
# 계좌 번호 / D+2 예수금 받음
def KiwoomAccount(self):
ACCOUNT_CNT = self.kiwoom.dynamicCall('GetLoginInfo("ACCOUNT_CNT")')
ACC_NO = self.kiwoom.dynamicCall('GetLoginInfo("ACCNO")')
self.account = ACC_NO.split(';')[0:-1]
self.sAccount = self.account[0]
global Account
Account = self.sAccount
global 로봇거래계좌번호
로봇거래계좌번호 = self.sAccount
print('계좌 : ', self.sAccount)
print('로봇계좌 : ', 로봇거래계좌번호)
self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", self.sAccount)
self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "d+2예수금요청", "opw00001", 0,
'{:04d}'.format(self.ScreenNumber))
self.depositLoop = QEventLoop() # self.d2_deposit를 로봇에서 바로 쓸 수 있도록하기 위해서 예수금을 받고나서 루프해제시킴
self.depositLoop.exec_()
# return (ACCOUNT_CNT, ACC_NO)
def KiwoomSendOrder(self, sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo):
if self.주문제한 < 초당횟수제한:
Order = self.kiwoom.dynamicCall(
'SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)',
[sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo])
self.주문제한 += 1
return (True, Order)
else:
return (False, 0)
# -거래구분값 확인(2자리)
#
# 00 : 지정가
# 03 : 시장가
# 05 : 조건부지정가
# 06 : 최유리지정가
# 07 : 최우선지정가
# 10 : 지정가IOC
# 13 : 시장가IOC
# 16 : 최유리IOC
# 20 : 지정가FOK
# 23 : 시장가FOK
# 26 : 최유리FOK
# 61 : 장전 시간외단일가매매
# 81 : 장후 시간외종가
# 62 : 시간외단일가매매
#
# -매매구분값 (1 자리)
# 1 : 신규매수
# 2 : 신규매도
# 3 : 매수취소
# 4 : 매도취소
# 5 : 매수정정
# 6 : 매도정정
def KiwoomSetRealReg(self, sScreenNo, sCode, sRealType='0'):
ret = self.kiwoom.dynamicCall('SetRealReg(QString, QString, QString, QString)', sScreenNo, sCode, '9001;10',
sRealType) # 10은 실시간FID로 메뉴얼에 나옴(현재가,체결가, 실시간종가)
return ret
# pass
def KiwoomSetRealRemove(self, sScreenNo, sCode):
ret = self.kiwoom.dynamicCall('SetRealRemove(QString, QString)', sScreenNo, sCode)
return ret
def KiwoomScreenNumber(self):
self.screen_number += 1
if self.screen_number > 8999:
self.screen_number = 5000
return self.screen_number
def OnEventConnect(self, nErrCode):
# logger.debug('main:OnEventConnect', nErrCode)
if nErrCode == 0:
# self.kiwoom.dynamicCall("KOA_Functions(QString, QString)", ["ShowAccountWindow", ""]) # 계좌 비밀번호 등록 창 실행(자동화를 위해서 AUTO 설정 후 등록 창 미실행
self.statusbar.showMessage("로그인 성공")
current = datetime.datetime.now().strftime('%H:%M:%S')
if current <= '08:58:00':
Telegram("[StockTrader]키움API 로그인 성공")
로그인상태 = True
# 로그인 성공하고 바로 계좌 및 보유 주식 목록 저장
self.KiwoomAccount()
self.InquiryList()
# self.GetCondition() # 조건검색식을 모두 읽어서 해당하는 종목 저장
else:
self.statusbar.showMessage("연결실패... %s" % nErrCode)
로그인상태 = False
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
# logger.debug('main:OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
pass
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
# logger.debug('main:OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
# print("MainWindow : OnReceiveTrData")
if self.ScreenNumber != int(sScrNo):
return
if sRQName == "주식분봉차트조회":
self.주식분봉컬럼 = ['체결시간', '현재가', '시가', '고가', '저가', '거래량']
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.주식분봉컬럼:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and (S[0] == '-' or S[0] == '+'):
S = S[1:].lstrip('0')
row.append(S)
self.종목분봉.append(row)
if sPreNext == '2' and False:
QTimer.singleShot(주문지연, lambda: self.ReguestPriceMin(_repeat=2))
else:
df = DataFrame(data=self.종목분봉, columns=self.주식분봉컬럼)
df['체결시간'] = df['체결시간'].apply(
lambda x: x[0:4] + '-' + x[4:6] + '-' + x[6:8] + ' ' + x[8:10] + ':' + x[10:12] + ':' + x[12:])
df['종목코드'] = self.종목코드[0]
df['틱범위'] = self.틱범위
df = df[['종목코드', '틱범위', '체결시간', '현재가', '시가', '고가', '저가', '거래량']]
values = list(df.values)
try:
df.ix[df.현재가 == '', ['현재가']] = 0
except Exception as e:
pass
try:
df.ix[df.시가 == '', ['시가']] = 0
except Exception as e:
pass
try:
df.ix[df.고가 == '', ['고가']] = 0
except Exception as e:
pass
try:
df.ix[df.저가 == '', ['저가']] = 0
except Exception as e:
pass
try:
df.ix[df.거래량 == '', ['거래량']] = 0
except Exception as e:
pass
if sRQName == "주식일봉차트조회":
try:
self.주식일봉컬럼 = ['일자', '현재가', '거래량'] # ['일자', '현재가', '시가', '고가', '저가', '거래량', '거래대금']
# cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
cnt = self.AnalysisPriceList[3] + 30
for i in range(0, cnt):
row = []
for j in self.주식일봉컬럼:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
# if S == '': S = 0
# if j != '일자':S = int(float(S))
row.append(S)
# print(row)
self.종목일봉.append(row)
df = DataFrame(data=self.종목일봉, columns=self.주식일봉컬럼)
# df.to_csv('data.csv')
try:
df.loc[df.현재가 == '', ['현재가']] = 0
df.loc[df.거래량 == '', ['거래량']] = 0
except:
pass
df = df.sort_values(by='일자').reset_index(drop=True)
# df.to_csv('data.csv')
self.UploadAnalysisData(data=df, 구분='일봉')
if len(self.종목리스트) > 0:
self.종목코드 = self.종목리스트.pop(0)
QTimer.singleShot(주문지연, lambda: self.ReguestPriceDaily(_repeat=0))
else:
print('일봉데이터 수신 완료')
self.DailyData = False
self.WeeklyData = True
self.MonthlyData = False
self.InvestorData = False
self.stock_analysis()
except Exception as e:
print('OnReceiveTrData_주식일봉차트조회 : ', self.종목코드, e)
if sRQName == "주식주봉차트조회":
try:
self.주식주봉컬럼 = ['일자', '현재가'] # ['일자', '현재가', '시가', '고가', '저가', '거래량', '거래대금']
# cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
cnt = self.AnalysisPriceList[4]+5
for i in range(0, cnt):
row = []
for j in self.주식주봉컬럼:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
# if S == '': S = 0
# if j != '일자':S = int(float(S))
row.append(S)
# print(row)
self.종목주봉.append(row)
df = DataFrame(data=self.종목주봉, columns=self.주식주봉컬럼)
# df.to_csv('data.csv')
try:
df.loc[df.현재가 == '', ['현재가']] = 0
except:
pass
df = df.sort_values(by='일자').reset_index(drop=True)
# df.to_csv('data.csv')
self.UploadAnalysisData(data=df, 구분='주봉')
if len(self.종목리스트) > 0:
self.종목코드 = self.종목리스트.pop(0)
QTimer.singleShot(주문지연, lambda: self.ReguestPriceWeekly(_repeat=0))
else:
print('주봉데이터 수신 완료')
self.DailyData = False
self.WeeklyData = False
self.MonthlyData = True
self.InvestorData = False
self.stock_analysis()
except Exception as e:
print('OnReceiveTrData_주식주봉차트조회 : ', self.종목코드, e)
if sRQName == "주식월봉차트조회":
try:
self.주식월봉컬럼 = ['일자', '현재가'] # ['일자', '현재가', '시가', '고가', '저가', '거래량', '거래대금']
# cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
cnt = self.AnalysisPriceList[5]+5
for i in range(0, cnt):
row = []
for j in self.주식월봉컬럼:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
# if S == '': S = 0
# if j != '일자':S = int(float(S))
row.append(S)
# print(row)
self.종목월봉.append(row)
df = DataFrame(data=self.종목월봉, columns=self.주식월봉컬럼)
try:
df.loc[df.현재가 == '', ['현재가']] = 0
except:
pass
df = df.sort_values(by='일자').reset_index(drop=True)
#df.to_csv('data.csv')
self.UploadAnalysisData(data=df, 구분='월봉')
if len(self.종목리스트) > 0:
self.종목코드 = self.종목리스트.pop(0)
QTimer.singleShot(주문지연, lambda: self.ReguestPriceMonthly(_repeat=0))
else:
print('월봉데이터 수신 완료')
self.DailyData = False
self.WeeklyData = False
self.MonthlyData = False
self.InvestorData = True
self.stock_analysis()
except Exception as e:
print('OnReceiveTrData_주식월봉차트조회 : ', self.종목코드, e)
if sRQName == "종목별투자자조회":
self.종목별투자자컬럼 = ['일자', '기관계', '외국인투자자', '개인투자자']
# ['일자', '현재가', '전일대비', '누적거래대금', '개인투자자', '외국인투자자', '기관계', '금융투자', '보험', '투신', '기타금융', '은행','연기금등', '국가', '내외국인', '사모펀드', '기타법인']
try:
# cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
cnt = 10
for i in range(0, cnt):
row = []
for j in self.종목별투자자컬럼:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0').replace('--', '-')
if S == '': S = '0'
row.append(S)
self.종목별투자자.append(row)
df = DataFrame(data=self.종목별투자자, columns=self.종목별투자자컬럼)
df['일자'] = df['일자'].apply(lambda x: x[0:4] + '-' + x[4:6] + '-' + x[6:])
try:
df.ix[df.개인투자자 == '', ['개인투자자']] = 0
df.ix[df.외국인투자자 == '', ['외국인투자자']] = 0
df.ix[df.기관계 == '', ['기관계']] = 0
except:
pass
# df.dropna(inplace=True)
df = df.sort_values(by='일자').reset_index(drop=True)
#df.to_csv('종목별투자자.csv', encoding='euc-kr')
self.UploadAnalysisData(data=df, 구분='종목별투자자')
if len(self.종목리스트) > 0:
self.종목코드 = self.종목리스트.pop(0)
QTimer.singleShot(주문지연, lambda: self.RequestInvestorDaily(_repeat=0))
else:
print('종목별투자자데이터 수신 완료')
self.end = datetime.datetime.now()
print('start :', self.start)
print('end :', self.end)
print('소요시간 :', self.end - self.start)
self.df_analysis = | pd.merge(self.df_daily, self.df_weekly, on='종목코드', how='outer') | pandas.merge |
from cova import FEATURETABLE, GENOME, RFS, CDS, PSEQS
from cova import utils
from Bio.Data.CodonTable import unambiguous_dna_by_id as codon_table
import os, sys, pandas, math, multiprocessing, numpy
from time import time
#### Point mutations #######
def ann_pm(vpos,vseq,ft=FEATURETABLE,cdss=CDS,ct=codon_table[1],rfs=RFS):
"""
Annotate a point mutation in a given protein with the corresponding amino acid change.
Arguments:
* Mandatory
- vpos - variant's genomic position
- vseq - variant's sequence
* Optional
- ft - reference feature table with protein ids for index
- cdss - reference CDS biopython sequence records
- ct - biopython codon table
- rfs - dataframe with info on ribosomal slippage cases
Value:
A pandas dataframe with 1 row for every variant codon and following columns-
- position - 1-indexed genomic position
- var_base - variant base sequence
- protein_id - product accession of the target protein
- name - common name for the protein
- ref_codon - reference codon at the variant position
- var_codon - variant codon
- aa_change - amino acid substitution caused by the variant
"""
# initialize output dataframe
colnames = ['position','var_base','protein_id','name','ref_codon','var_codon','aa_change']
out = pandas.DataFrame( columns=colnames)
# find the affected protein(s)
prots = ft[ (ft['start'] <= vpos) & (vpos <= ft['end'])].index
# return empty if no protein is affected
if len(prots) == 0:
return out
# 0-index of the variant's genome position
genome_x = vpos-1
# for every affected protein
for p in prots:
# 0-indexed ends of the CDS
b = ft.loc[p,'start']-1
e = ft.loc[p,'end']
# corresponding CDS sequence
cds_seq = cdss[p]
# variant's index in the CDS
if p in rfs.index:
rfs_pos = rfs.loc[p,'genomic_position']
rfs_type = rfs.loc[p,'type']
if genome_x < (rfs_pos-1):
cds_x = genome_x - b
else:
cds_x = genome_x - b - rfs_type
else:
cds_x = genome_x - b
# corresponding list of codons
try:
codonls = utils.n2c(cds_seq)
except utils.LenSeqError:
print("\tinvalid CDS!")
continue
# list of amino acid variant(s)
try:
avs = utils.nv2av(p=cds_x, v=vseq, seq=codonls)
except (utils.LenSeqError,ValueError):
print('''Invalid variant {}'''.format(vpos))
continue
avs = pandas.DataFrame( [ [vpos,vseq,p,ft.loc[p,'name']]+i for i in avs], columns=colnames)
out = out.append(avs)
return out
def ann_pm_apply(vrow,ft=FEATURETABLE,chars='ACGT'):
# alleles at the position in the samples
alls = vrow[2:]
# alternative allelles
alts = set(alls[ alls != vrow.ref])
# only retain permissible characters
alts = { i for i in alts if all(j in chars for j in i)}
if len(alts) == 0:
print("No valid alternative alleles!")
return
alts = pandas.Series(list(alts))
vrs = alts.apply( lambda x: ann_pm(vrow.pos,x,ft))
vlist = vrs.tolist()
# if no valid variants
if len(vlist) == 0:
return
out = | pandas.concat(vlist,ignore_index=True) | pandas.concat |
#! /usr/bin/python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_selection import RFECV
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
def first_analysis():
"""
Analyzes column correlations.
"""
plt.figure(figsize=(12, 6))
sns.heatmap(train.corr(), vmin=-1, vmax=1, annot=True)
plt.show()
def further_analysis():
"""
Optional analysis. Used for visualizing other columns to optimize.
"""
print(train[['SibSp', 'Parch']].info())
train['SibSp'].value_counts().plot(kind='bar')
plt.show()
train['Parch'].value_counts().plot(kind='bar')
plt.show()
sib_pivot = train.pivot_table(index="SibSp", values="Survived")
sib_pivot.plot.bar(ylim=(0, 1), yticks=np.arange(0, 1, .1))
plt.show()
parch_pivot = train.pivot_table(index="Parch", values="Survived")
parch_pivot.plot.bar(ylim=(0, 1), yticks=np.arange(0, 1, .1))
plt.show()
explore_cols = ['SibSp', 'Parch', 'Survived']
explore = train[explore_cols].copy()
explore['family_size'] = explore[['SibSp', 'Parch']].sum(axis=1)
# Create histogram
explore['family_size'].value_counts(sort=False).plot(kind='bar')
plt.show()
family_pivot = explore.pivot_table(index=['family_size'], values="Survived")
family_pivot.plot.bar(ylim=(0, 1), yticks=np.arange(0, 1, .1))
plt.show()
def process_missing(df):
"""
Converts missing data in the dataframe to values interpretable to ML
models.
:param df: Dataframe with missing values
:return: Transformed dataframe
"""
df["Fare"] = df["Fare"].fillna(train["Fare"].mean())
df["Embarked"] = df["Embarked"].fillna("S")
return df
def process_age(df):
"""
Converts the Age column in the dataframe to pre-defined bins.
:param df: Dataframe
:return: Dataframe with Age column having pre-defined bins
"""
df["Age"] = df["Age"].fillna(-0.5)
cut_points = [-1, 0, 5, 12, 18, 35, 60, 100]
label_names = ["Missing", "Infant", "Child", "Teenager", "Young Adult",
"Adult", "Senior"]
df["Age_categories"] = pd.cut(df["Age"], cut_points, labels=label_names)
return df
def process_fare(df):
"""
Converts the Fare column into pre-defined bins.
:param df: Dataframe
:return: Dataframe with Fare column having pre-defined bins
"""
cut_points = [-1, 12, 50, 100, 1000]
label_names = ["0-12", "12-50", "50-100", "100+"]
df["Fare_categories"] = pd.cut(df["Fare"], cut_points, labels=label_names)
return df
def process_cabin(df):
"""
Converts the Cabin column into pre-defined bins.
:param df: Dataframe
:return: Dataframe with Cabin column having pre-defined bins
"""
df["Cabin_type"] = df["Cabin"].str[0]
df["Cabin_type"] = df["Cabin_type"].fillna("Unknown")
df = df.drop('Cabin', axis=1)
return df
def process_titles(df):
"""
Extracts and categorizes the title from each name entry.
:param df: Dataframe
:return: Dataframe with an additional column for a person's title
"""
titles = {
"Mr": "Mr",
"Mme": "Mrs",
"Ms": "Mrs",
"Mrs": "Mrs",
"Master": "Master",
"Mlle": "Miss",
"Miss": "Miss",
"Capt": "Officer",
"Col": "Officer",
"Major": "Officer",
"Dr": "Officer",
"Rev": "Officer",
"Jonkheer": "Royalty",
"Don": "Royalty",
"Sir": "Royalty",
"Countess": "Royalty",
"Dona": "Royalty",
"Lady": "Royalty"
}
extracted_titles = df["Name"].str.extract(r' ([A-Za-z]+)\.', expand=False)
df["Title"] = extracted_titles.map(titles)
return df
def process_family(df):
"""
Evaluates the SibSp and Parch columns to determine if passenger was
alone and assigns accordingly.
:param df: Dataframe to be transformed
:return: Transformed dataframe
"""
df.loc[(df['SibSp'] > 0) | (df['Parch'] > 0), 'isalone'] = 0
df.loc[(df['SibSp'] == 0) & (df['Parch'] == 0), 'isalone'] = 1
return df
def create_dummies(df, column_name):
"""
Creates dummy columns (one hot encoding) from a single column
:param df: Dataframe
:param column_name: Column from the dataframe
:return: Dataframe with the created dummy columns
"""
dummies = pd.get_dummies(df[column_name], prefix=column_name)
df = pd.concat([df, dummies], axis=1)
df = df.drop([column_name], axis=1)
return df
# Create a new function that combines all the functions in functions.py
def process_data(df):
"""
Cleans and processes the dataframe to be ready for use in ML models.
:param df: Original dataframe
:return: Transformed dataframe
"""
# Perform data cleaning
df = process_missing(df)
df = process_age(df)
df = process_fare(df)
df = process_titles(df)
df = process_cabin(df)
df = process_family(df)
# Create binary classifications from columns & create dummy columns
df = create_dummies(df, 'Age_categories')
df = create_dummies(df, 'Fare_categories')
df = create_dummies(df, 'Title')
df = create_dummies(df, 'Cabin_type')
df = create_dummies(df, 'Sex')
return df
def select_features(df):
"""
Selects features to use in Random Forest model.
:param df: Clean dataframe
:return: Columns predicted to provide best fit for data/predictions
"""
numeric_df = df.select_dtypes(exclude=['object'])
numeric_df = numeric_df.dropna(axis=1)
all_x = numeric_df.drop(['PassengerId', 'Survived'], axis=1).copy()
all_y = numeric_df['Survived'].copy()
rfc = RandomForestClassifier(random_state=1)
selector = RFECV(rfc, cv=10)
selector.fit(all_x, all_y)
best_columns = all_x.columns[selector.support_]
return best_columns
def select_model(df, feature_list):
"""
Provides a summary of ML models and hyperparameters that fit the
training data.
:param df: Clean dataframe
:param feature_list: List of columns to use in model
:return: Dictionary of tested ML models and hyperparameters
"""
all_x = df[feature_list]
all_y = df['Survived']
models = [{
'name': 'Logistic Regression',
'estimator': LogisticRegression(),
'hyperparameters':
{
'solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],
'max_iter': [1000]
}
},
{
'name': 'K-Neighbors Classifier',
'estimator': KNeighborsClassifier(),
'hyperparameters':
{
'n_neighbors': range(1, 20, 2),
'weights': ['distance', 'uniform'],
'algorithm': ['ball_tree', 'kd_tree', 'brute'],
'p': [1, 2]
}
},
{
'name': 'Random Forest Classifier',
'estimator': RandomForestClassifier(),
'hyperparameters':
{
'n_estimators': [10, 25, 50, 100],
'criterion': ['entropy', 'gini'],
'max_depth': [2, 5, 10],
'max_features': ['log2', 'sqrt'],
'min_samples_leaf': [1, 2, 3, 4, 5, 8],
'min_samples_split': [2, 3, 4, 5]
}
},
{
'name': 'Support Vector Classifier',
'estimator': SVC(),
'hyperparameters':
{
'C': [0.1, 1, 10, 100],
'kernel': ['linear', 'poly', 'rbf', 'sigmoid']
}
}
]
for model in models:
print(model['name'])
grid = GridSearchCV(model['estimator'],
param_grid=model['hyperparameters'],
cv=5)
grid.fit(all_x, all_y)
model['estimator'] = grid.best_estimator_
model['hyperparameters'] = grid.best_params_
model['score'] = grid.best_score_
print(model['hyperparameters'])
print(model['score'])
return models
def save_submission_file(model, columns, filename=None):
"""
Uses a specified ML model to predict on holdout (test) data.
Saves the results into a CSV file that can be submitted to Kaggle.
:param model: ML model
:param columns: List of columns to use in ML model
:param filename: Specified filename. Default is to use the dataframe
variable name.
:return: CSV file containing passenger ID and predicted survival
values on test data.
"""
holdout_predictions = model.predict(holdout[columns])
submission = {'PassengerId': holdout['PassengerId'],
'Survived': holdout_predictions
}
submission = pd.DataFrame(submission)
submission.to_csv(path_or_buf=filename, index=False)
return print('Save successful!')
if __name__ == '__main__':
train = | pd.read_csv('train.csv') | pandas.read_csv |
"""
Search and recognize the name, category and
brand of a product from its description.
"""
from typing import Optional, List, Union, Dict
from itertools import combinations
import pandas as pd # type: ignore
from pymystem3 import Mystem # type: ignore
try:
from cat_model import PredictCategory # type: ignore
except ImportError:
from receipt_parser.cat_model import PredictCategory # type: ignore
# pylint: disable=C1801
def df_apply(data: pd.DataFrame, func, axis: int = 1) -> pd.DataFrame:
"""
User define the `apply` function from pd.DataFrame.
Use only for 2-column and 3-column data.
Parameters
----------
data : pd.DataFrame
The data on which the `func` function will be applied.
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default=1
Axis along which the function is applied.
Returns
-------
pd.DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
Examples
--------
>>> from pandas import DataFrame
>>> DataFrame.my_apply = df_apply
>>> df[['name', 'brand']].my_apply(foo)
"""
_cols = data.columns
_len = len(_cols)
if _len == 2:
return data.apply(lambda x: func(x[_cols[0]], x[_cols[1]]), axis=axis)
return data.apply(lambda x: func(x[_cols[0]], x[_cols[1]], x[_cols[2]]), axis=axis)
class Finder:
"""
Search and recognize the name, category and brand of a product
from its description.
Search is carried out in the collected datasets: `brands_ru.csv`,
`products.csv`, `all_clean.csv`.
Parameters
----------
pathes: Optional[Dict[str, str]], (default=None)
Dictionary with paths to required files.
Attributes
----------
mystem : Mystem
A Python wrapper of the Yandex Mystem 3.1 morphological
analyzer (http://api.yandex.ru/mystem).
See aslo `https://github.com/nlpub/pymystem3`.
cat_model: PredictCategory
Class for predicting a category by product description
using a neural network written in PyTorch.
brands_ru : np.ndarray
List of Russian brands.
products : pd.DataFrame
DataFrame of product names and categories.
all_clean : pd.DataFrame
General dataset with all product information.
data: pd.DataFrame
Text column with a description of the products to parse.
Products description should be normalized by Normalizer.
See `receipt_parser.normalize.Normalizer`.
Examples
--------
>>> product = 'Майонез MR.RICCO Провансаль 67% д/п 400'
>>> finder = Finder()
>>> finder.find_all(product)
Notes
-----
You may be comfortable with the following resource:
'https://receiptnlp.tinkoff.ru/'.
See also `receipt_parser.parsers.tinkoff`.
"""
def __init__(self, pathes: Optional[Dict[str, str]] = None):
pathes = pathes or {}
self.mystem = Mystem()
pd.DataFrame.appl = df_apply
# Init model:
model_params = {"num_class": 21, "embed_dim": 50, "vocab_size": 500}
bpe_model = pathes.get("cat_bpe_model", "models/cat_bpe_model.yttm")
cat_model = pathes.get("cat_model", "models/cat_model.pth")
self.cat_model = PredictCategory(bpe_model, cat_model, model_params)
# Read DataFrames:
brands = pathes.get("brands_ru", "data/cleaned/brands_ru.csv")
products = pathes.get("products", "data/cleaned/products.csv")
all_clean = pathes.get("all_clean", "data/cleaned/all_clean.csv")
self.brands_ru = pd.read_csv(brands)["brand"].values
self.products = pd.read_csv(products)
self.all_clean = | pd.read_csv(all_clean) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests_cache
import datetime
import pandas as pd
from datetime import timedelta
import pandas as pd
from pandas.io.common import ZipFile
from pandas.compat import BytesIO, StringIO, PY2
def main():
expire_after = timedelta(days=1)
if PY2:
filename = 'cache_py2'
else:
filename = 'cache'
session = requests_cache.CachedSession(cache_name=filename, expire_after=expire_after)
dt = pd.to_datetime("2014-01-01")
symbol = "AUD/USD"
symbol = symbol.replace("/", "").upper()
year = dt.year
month = dt.month
month_name = datetime.datetime(year=1970, month=month, day=1).strftime('%B').upper()
#url = "http://www.truefx.com/dev/data/2014/JANUARY-2014/AUDUSD-2014-01.zip"
url = "http://www.truefx.com/dev/data/{year:04d}/{month_name}-{year:04d}/{symbol}-{year:04d}-{month:02d}.zip".format(year=year, month=month, symbol=symbol, month_name=month_name)
response = session.get(url)
zip_data = BytesIO(response.content)
filename = "{symbol}-{year:04d}-{month:02d}.csv".format(year=year, month=month, symbol=symbol)
with | ZipFile(zip_data, 'r') | pandas.io.common.ZipFile |
"""By: Xiaochi (<NAME>: github.com/XC-Li"""
import pandas as pd
import os
from util_code.xml_parser import bs_parser, xml_parser, get_person_speech_pair
# from xiaodan.data_loader import get_data
from tqdm.autonotebook import tqdm # auto backend selection
# get xml data info: This function is written by <NAME>
def get_data(path):
"""
get one specific xml path
"""
all_path = []
def searchPath(path):
for item in os.listdir(path):
subFile = path + "/" + item
if os.path.isdir(subFile):
searchPath(subFile)
else:
if subFile.split('.')[-1] == "xml": # bug fix: out of range
# get all path
# path = subFile
all_path.append(subFile)
searchPath(path)
return all_path
def corpus_loader(debug=False, parser='bs', data_root='../opinion_mining/'):
"""
Corpus Loader: Match the record between action.csv and document.csv and load corpus from XML
Args:
debug(Bool): the switch for debug
parser(str): which parser to use, bs/xml
data_root(str): the root of data and labels
Returns:
Pandas DataFrame
"""
# data_root = '../opinion_mining/'
corpus_root = data_root + 'cr_corpus/'
action = pd.read_csv(data_root + 'action_data.csv')
document = pd.read_csv(data_root + 'document_data.csv')
count = match = no_match = 0
data_list = []
no_match_list = []
for index, _ in action.iterrows():
count += 1
cr_pages = action.loc[index, 'cr_pages']
support = 1 if action.loc[index, 'action_text_string'].startswith('Support') else -1
person_id = action.loc[index, 'person_id']
cr_date = action.loc[index, 'cr_date'][:10]
# print(cr_pages, support, person_id, cr_date)
first_name = action.loc[index, 'first_name']
middle_name = action.loc[index, 'middle_name']
last_name = action.loc[index, 'last_name']
full_name = first_name + '-' + str(middle_name) + '-' + last_name
party = action.loc[index, 'party_abbreviation']
chamber = action.loc[index, 'chamber']
title = action.loc[index, 'brief_title']
doc = document.loc[(document['page_range'] == cr_pages) & (document['pub_date'] == cr_date)]
if len(doc) == 0:
if debug:
print('No match', cr_pages, support, person_id, cr_date)
# doc = document.loc[(document['page_range'].str.contains(cr_pages)) & (document['pub_date'] == cr_date)]
# if len(doc) == 0:
# print('still no match')
# no_match_list.append([cr_pages, support, person_id, cr_date])
no_match += 1
continue
volume_no = doc['volume_no'].iloc[0]
issue_no = doc['issue_no'].iloc[0]
page_range = doc['page_range'].iloc[0]
# print(volume_no, issue_no, page_range)
path = corpus_root + str(volume_no) + '/' + str(issue_no) + '/' + str(page_range) + '/'
for file in os.listdir(path):
# print(i,':', person_id, ':',path+file)
if parser == 'bs':
text = bs_parser(path + file, person_id)
else:
text = xml_parser(path + file, person_id)
if len(text) > 0:
# print('match')
match += 1
data_list.append([support, text, volume_no, issue_no, page_range,
person_id, full_name, party, chamber, title])
column_name = ['support', 'text', 'volume_no', 'issue_no', 'page_range',
'person_id', 'full_name', 'party', 'chamber', 'title']
data_frame = pd.DataFrame(data_list)
data_frame.columns = column_name
print('Total:', count, 'Match:', match, 'No Match:', no_match)
# no_match_df = pd.DataFrame(no_match_list)
# no_match_df.columns = ['cr_pages', 'support', 'person_id', 'cr_date']
# no_match_df.to_csv('no_match.csv')
return data_frame
def untagged_corpus_loader(tagged_df=None, path_root='../opinion_mining'):
"""
untagged corpus loader: Load all the untagged corpus from XML files
Args:
tagged_df(Pandas DataFrame): the tagged data frame
path_root(str): the root of the path to search all XML files
Returns:
untagged_data_frame(Pandas DataFrame): untagged data frame, in the same format of tagged_df
"""
# tagged_df = corpus_loader()
if tagged_df is not None:
tagged_ids = tagged_df['volume_no'].map(str) + '/' + tagged_df['issue_no'].map(str) + '/' \
+ tagged_df['page_range'].map(str) + ':' + tagged_df['person_id'].map(str)
else:
tagged_ids = []
all_xml_path = get_data(path_root)
# print(len(all_xml_path))
untagged_data_list = []
total = untagged = 0
for file_name in tqdm(all_xml_path):
total += 1
volume_no = file_name.split('/')[-4]
issue_no = file_name.split('/')[-3]
page_range = file_name.split('/')[-2]
person_id_speech_pair = get_person_speech_pair(file_name)
for person_id in person_id_speech_pair:
unique_id = volume_no + '/' + issue_no + '/' + page_range + ':' + person_id
if unique_id not in tagged_ids:
untagged += 1
text = person_id_speech_pair[person_id]
support = full_name = party = chamber = title = 0
untagged_data_list.append([support, text, volume_no, issue_no, page_range,
person_id, full_name, party, chamber, title])
column_name = ['support', 'text', 'volume_no', 'issue_no', 'page_range',
'person_id', 'full_name', 'party', 'chamber', 'title']
untagged_data_frame = | pd.DataFrame(untagged_data_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Autor: <NAME>
# Datum: Tue Sep 14 18:00:32 2021
# Python 3.8.8
# Ubuntu 20.04.1
from typing import List, Tuple
import pandas as pd
from nltk.probability import FreqDist
from nltk.tokenize.casual import TweetTokenizer
from nltk.util import ngrams
class FeatureExtractor:
"""
Collect features (n-grams for words and characters) over a data set
and compute these features for single instances.
"""
def __init__(
self,
) -> None:
self.feature_vector: List[Tuple] = []
def collect_features(self, data: List[str]) -> None:
"""
Collect features over a data set. Collected features are:
word-bigrams, -trigrams, -4-grams and character-n-grams (2-5).
Parameters
----------
data : List[str]
List of texts in training set.
Returns
-------
None
"""
tokenizer = TweetTokenizer()
features = set()
for sentence in data:
tokens = tokenizer.tokenize(sentence.lower())
features.update(set(self._extract_word_n_grams(tokens)))
features.update(set(self._extract_character_n_grams(tokens)))
self.feature_vector = list(features)
@staticmethod
def _extract_word_n_grams(tokens: List[str]) -> List[Tuple[str]]:
features = []
for i in range(1, 4):
features += ngrams(tokens, i)
return features
@staticmethod
def _extract_character_n_grams(tokens: List[str]) -> List[Tuple[str]]:
char_features = []
for token in tokens:
for i in range(2, 6):
char_features += ngrams(token, i)
return char_features
def get_features_for_instance(self, instance_text: str) -> List[int]:
"""
Apply collected features to a single instance.
Parameters
----------
instance_text : str
Text of instance to compute features for.
Returns
-------
List[int]
Feature vector for instance.
"""
tokenizer = TweetTokenizer()
tokens = tokenizer.tokenize(instance_text)
instance_features = FreqDist(
self._extract_word_n_grams(tokens) + self._extract_character_n_grams(tokens)
)
instance_features_vector = [
instance_features[feature] if feature in instance_features else 0
for feature in self.feature_vector
]
return | pd.Series(instance_features_vector) | pandas.Series |
"""
This module implements the intermediates computation
for plot(df) function.
"""
from sys import stderr
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import dask
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
from scipy.stats import gaussian_kde, norm
from ...errors import UnreachableError
from ..dtypes import DType, is_categorical, is_numerical
from ..intermediate import Intermediate
from ..utils import to_dask
__all__ = ["compute"]
def compute(
df: Union[pd.DataFrame, dd.DataFrame],
x: Optional[str] = None,
y: Optional[str] = None,
*,
bins: int = 10,
ngroups: int = 10,
largest: bool = True,
nsubgroups: int = 5,
bandwidth: float = 1.5,
sample_size: int = 1000,
value_range: Optional[Tuple[float, float]] = None,
) -> Intermediate:
"""
Parameters:
----------
df : Union[pd.DataFrame, dd.DataFrame]
Dataframe from which plots are to be generated.
x : str, optional, default None
A valid column name from the dataframe.
y : str, optional, default None
A valid column name from the dataframe.
bins : int, default 10
For a histogram or box plot with numerical x axis, it defines
the number of equal-width bins to use when grouping.
ngroups : int, default 10
When grouping over a categorical column, it defines the
number of groups to show in the plot. Ie, the number of
bars to show in a bar chart.
largest : bool, default True
If true, when grouping over a categorical column, the groups
with the largest count will be output. If false, the groups
with the smallest count will be output.
nsubgroups : int
If x and y are categorical columns, ngroups refers to
how many groups to show from column x, and nsubgroups refers to
how many subgroups to show from column y in each group in column x.
bandwidth : float, default 1.5
Bandwidth for the kernel density estimation.
sample_size : int, default 1000
Sample size for the scatter plot.
value_range : (float, float), optional, default None
The lower and upper bounds on the range of a numerical column.
Applies when column x is specified and column y is unspecified.
Returns
-------
Intermediate
"""
# pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-return-statements,too-many-statements
# pylint: disable=no-else-return
df = to_dask(df)
orig_df_len = len(df)
if x is None and y is None:
datas: List[Any] = []
col_names_dtypes: List[Tuple[str, DType]] = []
for column in df.columns:
if is_categorical(df[column].dtype):
# bar chart
datas.append(dask.delayed(calc_bar_pie)(df[column], ngroups, largest))
col_names_dtypes.append((column, DType.Categorical))
elif is_numerical(df[column].dtype):
# histogram
datas.append(dask.delayed(calc_hist)(df[column], bins, orig_df_len))
col_names_dtypes.append((column, DType.Numerical))
else:
raise UnreachableError
datas = dask.compute(*datas)
data = [(col, dtp, dat) for (col, dtp), dat in zip(col_names_dtypes, datas)]
return Intermediate(data=data, visual_type="basic_grid")
elif (x is None) != (y is None):
col: str = cast(str, x or y)
if is_categorical(df[col].dtype):
# data for bar and pie charts
data = dask.compute(dask.delayed(calc_bar_pie)(df[col], ngroups, largest))
return Intermediate(col=col, data=data[0], visual_type="categorical_column")
elif is_numerical(df[col].dtype):
if value_range is not None:
if (
(value_range[0] <= np.nanmax(df[x]))
and (value_range[1] >= np.nanmin(df[x]))
and (value_range[0] < value_range[1])
):
df = df[df[col].between(value_range[0], value_range[1])]
else:
print("Invalid range of values for this column", file=stderr)
# qq plot
qqdata = calc_qqnorm(df[col].dropna())
# histogram
histdata = dask.compute(dask.delayed(calc_hist)(df[col], bins, orig_df_len))
# kde plot
kdedata = calc_hist_kde(df[col].dropna().values, bins, bandwidth)
# box plot
boxdata = calc_box(df[[col]].dropna(), bins)
return Intermediate(
col=col,
histdata=histdata[0],
kdedata=kdedata,
qqdata=qqdata,
boxdata=boxdata,
visual_type="numerical_column",
)
else:
raise UnreachableError
if x is not None and y is not None:
xdtype, ydtype = df[x].dtype, df[y].dtype
if (
is_categorical(xdtype)
and is_numerical(ydtype)
or is_numerical(xdtype)
and is_categorical(ydtype)
):
x, y = (x, y) if is_categorical(df[x].dtype) else (y, x)
df[x] = df[x].apply(str, meta=(x, str))
# box plot per group
boxdata = calc_box(df[[x, y]].dropna(), bins, ngroups, largest)
# histogram per group
hisdata = calc_hist_by_group(df[[x, y]].dropna(), bins, ngroups, largest)
return Intermediate(
x=x,
y=y,
boxdata=boxdata,
histdata=hisdata,
visual_type="cat_and_num_cols",
)
elif is_categorical(xdtype) and is_categorical(ydtype):
df[x] = df[x].apply(str, meta=(x, str))
df[y] = df[y].apply(str, meta=(y, str))
# nested bar chart
nesteddata = calc_nested(df[[x, y]].dropna(), ngroups, nsubgroups)
# stacked bar chart
stackdata = calc_stacked(df[[x, y]].dropna(), ngroups, nsubgroups)
# heat map
heatmapdata = calc_heatmap(df[[x, y]].dropna(), ngroups, nsubgroups)
return Intermediate(
x=x,
y=y,
nesteddata=nesteddata,
stackdata=stackdata,
heatmapdata=heatmapdata,
visual_type="two_cat_cols",
)
elif is_numerical(xdtype) and is_numerical(ydtype):
# scatter plot
scatdata = calc_scatter(df[[x, y]].dropna(), sample_size)
# hexbin plot
hexbindata = df[[x, y]].dropna().compute()
# box plot
boxdata = calc_box(df[[x, y]].dropna(), bins)
return Intermediate(
x=x,
y=y,
scatdata=scatdata,
boxdata=boxdata,
hexbindata=hexbindata,
spl_sz=sample_size,
visual_type="two_num_cols",
)
else:
raise UnreachableError
return Intermediate()
def calc_bar_pie(
srs: dd.Series, ngroups: int, largest: bool
) -> Tuple[pd.DataFrame, int, float]:
"""
Calculates the group counts given a series.
Parameters
----------
srs : dd.Series
one categorical column
ngroups : int
number of groups to return
largest : bool
If true, show the groups with the largest count,
else show the groups with the smallest count
Returns
-------
Tuple[pd.DataFrame, float]
A dataframe of the group counts, the total count of groups,
and the percent of missing values
"""
miss_pct = round(srs.isna().sum() / len(srs) * 100, 1)
try:
grp_srs = srs.groupby(srs).size()
except TypeError:
srs = srs.astype(str)
grp_srs = srs.groupby(srs).size()
# select largest or smallest groups
smp_srs = grp_srs.nlargest(n=ngroups) if largest else grp_srs.nsmallest(n=ngroups)
df = smp_srs.to_frame().rename(columns={srs.name: "cnt"}).reset_index()
# add a row containing the sum of the other groups
other_cnt = len(srs) - df["cnt"].sum()
df = df.append( | pd.DataFrame({srs.name: ["Others"], "cnt": [other_cnt]}) | pandas.DataFrame |
"""to create TFRecords for ML classification model training from image chips, label and class id
Author: @developmentseed
Run:
python3 tf_records_creation_classification.py \
--tile_path=data/P400_v2/ \
--csv_files=data/csv/*_class_id.csv \
--output_dir=data/classification_training_tfrecords/ \
--output_csv=data/csv/classification_training_tfrecords.csv
"""
import os
from os import makedirs, path
import io
import json
from collections import namedtuple
import pandas as pd
import numpy as np
import tensorflow as tf
from PIL import Image
import click
from smart_open import open
import glob
import random
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = (
value.numpy()
) # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _bytes_feature_label(value):
"""Returns a bytes_list from classes in int """
label = tf.convert_to_tensor([0, 1] if value else [1, 0], dtype=tf.uint8)
label = tf.io.serialize_tensor(label)
if isinstance(label, type(tf.constant(0))):
label = (
label.numpy()
) # BytesList won't unpack a string from an EagerTensor
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[label]))
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def df_chunks(df, chunk_size):
"""Returns list of dataframe, splited on max size of 500"""
list_dfs = [
df[i : (i + chunk_size)] for i in range(0, df.shape[0], chunk_size)
]
return list_dfs
def shuffle_split_train_test(df):
"""spliting training data into 70% train, 20% validation and 10% test randomly
Args:
df: pandas dataframe in "chip , label"
Returns:
df_train, df_val, df_test: train, validation and test dataframe.
"""
tiles = pd.Series(df.chip).unique()
train_b, val_b = int(len(tiles) * 0.7), int(len(tiles) * 0.2)
tile_arr = np.array(list(tiles))
np.random.shuffle(tile_arr)
train = tile_arr[:train_b]
val = tile_arr[train_b : (train_b + val_b)]
test = tile_arr[(train_b + val_b) :]
df_train = df[df.chip.isin(train.tolist())]
df_val = df[df.chip.isin(val.tolist())]
df_test = df[df.chip.isin(test.tolist())]
return df_train, df_val, df_test
def map_labels(df):
"""customize label classes for objects"""
image_labels = dict(zip(df.chip, df.label))
return image_labels
def image_example(image_string, label, image_shape):
feature = {
"height": _int64_feature(image_shape[0]),
"width": _int64_feature(image_shape[1]),
"depth": _int64_feature(image_shape[2]),
"label": _bytes_feature_label(label),
"image": _bytes_feature(image_string),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def write_tf_file(df, record_file, labels, st_dir, image_shape, description):
"""write tf records files
Args:
df: pandas dataframe
record_file: tfrecord file name
labels: dictionary of labels
st_dir: directory to all supertiles
image_shape: shape of the images
description: description for printing on the outputs
Returns:
write the tf file
"""
print("#" * 60)
print(f"{len(df)} {description}")
print(f"{len(df[df['label']==1])} are objects")
print(f"{len(df[df['label']==0])} are not objects")
print("#" * 60)
with tf.io.TFRecordWriter(record_file) as writer:
for filename, label in labels.items():
if filename is None:
continue
try:
with open(filename, "rb") as image_file:
image = image_file.read()
except Exception as e:
print(f"Skipping '{filename}': {e}")
continue
tf_example = image_example(image, label, image_shape)
writer.write(tf_example.SerializeToString())
def write_tfrecords(df, st_dir, chunk_size, output):
"""write tfrecords for classification training
Args:
df: dataframe. chip, label
st_dir(string): directory to all supertiles
chunk_size(int): number of features to split the df and to be considered in the
TFrecords
super_tile(bool): if it's supertile
Returns:
(None): written train, val and test tfrcords.
"""
chunk_dfs = df_chunks(df, chunk_size)
base_name = "aiaia"
image_shape = (400, 400, 3)
for index, chunk_df in enumerate(chunk_dfs):
suffix = str(index + 1).zfill(3)
train_df, val_df, test_df = shuffle_split_train_test(chunk_df)
# train
record_file = os.path.join(
output, f"train_{base_name}_{suffix}.tfrecords"
)
train_labels = map_labels(train_df)
write_tf_file(
train_df,
record_file,
train_labels,
st_dir,
image_shape,
"samples as training",
)
# validation
record_file = os.path.join(
output, f"val_{base_name}_{suffix}.tfrecords"
)
val_labels = map_labels(val_df)
write_tf_file(
val_df,
record_file,
val_labels,
st_dir,
image_shape,
"samples as validation",
)
# test
record_file = os.path.join(
output, f"test_{base_name}_{suffix}.tfrecords"
)
test_labels = map_labels(test_df)
write_tf_file(
test_df,
record_file,
test_labels,
st_dir,
image_shape,
"samples as testing",
)
print("Finished writing TFRecords.")
@click.command(short_help="create tfrecords for classification training")
@click.option(
"--tile_path",
help="path to all the image tiles",
required=True,
type=str,
default="data/P400_v2/",
)
@click.option(
"--path_csv_files",
help="path csv files",
required=True,
type=str,
default="data/csv/*_class_id.csv",
)
@click.option(
"--output_dir",
help="Output path for saving tfrecords",
required=True,
type=str,
default="data/classification_training_tfrecords",
)
@click.option(
"--output_csv",
help="Output csv path",
required=True,
type=str,
default="data/csv/classification_training_tfrecords.csv",
)
def main(tile_path, path_csv_files, output_dir, output_csv):
if not path.isdir(output_dir):
makedirs(output_dir)
# # #################################################
# # # Filter unique chips
# # #################################################
csv_files = glob.glob(path_csv_files)
frames = []
for csv_file in csv_files:
df = pd.read_csv(csv_file)
prefix, _, _ = csv_file.split("/")[2].split("_")
prefix = tile_path + prefix + "_tiles/"
df["tile_id"] = prefix + df["tile_id"].astype(str)
frames.append(df)
df = | pd.concat(frames) | pandas.concat |
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
import tkinter as tk
from tkinter import ttk, scrolledtext, Menu, \
messagebox as msg, Spinbox, \
filedialog
global sol,f1Var,filePathBank,\
filePathLedger,filePathBank, \
intRad, intChk
filePathBank = ""
filePathLedger = ""
class BankReconciliation():
def __init__(self, bankDF, ledgerDF):
self.bankDF = bankDF
self.ledgerDF = ledgerDF
self.solution = {}
self.bankDF['Date'] = pd.to_datetime(bankDF['Date'])
self.ledgerDF['Date'] = | pd.to_datetime(ledgerDF['Date']) | pandas.to_datetime |
#!/usr/bin/env python
"""
MeteWIBELE: quantify_prioritization module
1) Define quantitative criteria to calculate numerical ranks and prioritize the importance of protein families
2) Prioritize the importance of protein families using unsupervised or supervised approaches
Copyright (c) 2019 Harvard School of Public Health
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os
import os.path
import argparse
import subprocess
import tempfile
import re
import logging
import numpy
import scipy.stats
import pandas as pd
from collections import namedtuple
from operator import attrgetter, itemgetter
# Try to load one of the MetaWIBELE modules to check the installation
try:
from metawibele import config
from metawibele import utilities
except ImportError:
sys.exit("CRITICAL ERROR: Unable to find the MetaWIBELE python package." +
" Please check your install.")
# name global logging instance
logger = logging.getLogger(__name__)
def parse_arguments():
"""
Parse the arguments from the user
"""
parser = argparse.ArgumentParser(
description = "MetaWIBELE-prioritize: prioritize importance of protein families based on quantitative properties\n",
formatter_class = argparse.RawTextHelpFormatter,
prog = "quantify_prioritization.py")
parser.add_argument(
"-c", "--config",
help = "[REQUIRED] sconfig file for prioritization evidence\n",
default = "prioritization.cfg",
required=True)
parser.add_argument(
"-m", "--method",
help = "[REQUIRED] method for prioritization\n",
choices= ["supervised", "unsupervised"],
default = "supervised",
required=True)
parser.add_argument(
"-r", "--ranking",
help = "[REQUIRED] approach for ranking\n",
choices= ["harmonic_mean", "arithmetic_mean", "minimal", "maximal"],
default = "harmonic_mean")
parser.add_argument(
"-w", "--weight",
help = "[REQUIRED] method for weighting: "
"[equal] specify equal weight for each evidence; "
"[correlated] specify weigh based on the pairwise correlation between evidence items;"
"[fixed] specify weigh manually in the config file\n",
choices= ["equal", "correlated", "fixed"],
default = "equal",
required=True)
parser.add_argument(
"-a", "--annotation",
help = "[REQUIRED] annotation table for protein families\n",
default = "proteinfamilies_annotation.tsv",
required=True)
parser.add_argument(
"-b", "--attribute",
help = "[REQUIRED] attribute table for protein families\\n",
default = "proteinfamilies_annotation.attribute.tsv",
required=True)
parser.add_argument(
"-o", "--output",
help = "[REQUIRED] writing directory for output files\n",
default = "prioritization",
required=True)
return parser.parse_args()
def read_config_file (conf_file, method):
"""
Collect config info for prioritization
Input: config filename
Output: evidence_conf = {DNA_prevalence:1, DNA_abundance:1, ...}
"""
config.logger.info ("Start read_config_file")
config_items = config.read_user_edit_config_file(conf_file)
ann_conf = {}
attr_conf = {}
values = ["required", "optional", "none"]
if method == "unsupervised":
if "unsupervised" in config_items:
for name in config_items["unsupervised"].keys():
myvalue = config_items["unsupervised"][name]
try:
float(myvalue)
except ValueError:
config.logger.info ("Not numberic values for the config item " + name)
continue
if myvalue.lower() == "none":
continue
if re.search("__", name):
name = re.sub("-", "_", name)
name = re.sub("\.", "_", name)
name = re.sub("\(", "_", name)
name = re.sub("\)", "", name)
attr_conf[name] = myvalue
else:
name = re.sub("-", "_", name)
name = re.sub("\.", "_", name)
name = re.sub("\(", "_", name)
name = re.sub("\)", "", name)
ann_conf[name] = myvalue
if myvalue.lower() == "required":
config.logger.info ("Required ranking item: " + name + "\t" + myvalue)
if myvalue.lower() == "optional":
config.logger.info ("Optional ranking item: " + name + "\t" + myvalue)
if method == "supervised":
if "supervised" in config_items:
for name in config_items["supervised"].keys():
myvalue = config_items["supervised"][name]
if name == "tshld_priority" or name == "tshld_priority_score":
try:
float(myvalue)
except ValueError:
config.logger.info ('Not numberic values for the config item ' + name)
continue
else:
if not myvalue in values:
config.logger.info ("Please use valid value for the config item " + name + ": e.g. required | optional | none")
continue
if myvalue.lower() == "none":
continue
if re.search("__", name):
name = re.sub("-", "_", name)
name = re.sub("\.", "_", name)
name = re.sub("\(", "_", name)
name = re.sub("\)", "", name)
attr_conf[name] = myvalue
else:
name = re.sub("-", "_", name)
name = re.sub("\.", "_", name)
name = re.sub("\(", "_", name)
name = re.sub("\)", "", name)
ann_conf[name] = myvalue
if myvalue.lower() == "required":
config.logger.info ("Required ranking item: " + name + "\t" + myvalue)
if myvalue.lower() == "optional":
config.logger.info ("Optional ranking item: " + name + "\t" + myvalue)
config.logger.info ("Finish read_config_file")
return ann_conf, attr_conf
def read_attribute_file (attr_file, attr_conf):
"""
Collect annotation evidence for protein families used for prioritization
Input: filename of the characterization file
Output: ann = {Cluster_XYZ: {qvalue:0.001, coef:-0.3, ...}, ...}
"""
required = {}
annotation = {}
split = {}
flags = {}
titles = {}
open_file = open(attr_file, "r")
line = open_file.readline()
line = re.sub("\n$", "", line)
info = line.split("\t")
for item in info:
titles[item] = info.index(item)
for line in open_file:
line = re.sub("\n$", "", line)
if not len(line):
continue
info = line.split("\t")
myid = info[titles["AID"]]
myclust, mytype = myid.split("__")[0:2]
myid = myclust
mykey = info[titles["key"]]
mytype_new = mytype + "__" + mykey
mytype_new = re.sub("-", "_", mytype_new)
mytype_new = re.sub("\.", "_", mytype_new)
mytype_new = re.sub("\(", "_", mytype_new)
mytype_new = re.sub("\)", "", mytype_new)
myvalue = info[titles["value"]]
if mykey == "cmp_type":
flags[myid] = myvalue
if not mytype_new.lower() in attr_conf:
continue
if attr_conf[mytype_new.lower()] == "required":
required[mytype_new] = ""
if re.search("MaAsLin2", mytype) and myid in flags:
myclust = myid + "|" + flags[myid]
if not myid in split:
split[myid] = {}
split[myid][myclust] = ""
if myvalue == "NA" or myvalue == "NaN" or myvalue == "nan" or myvalue == "Nan":
continue
if not myclust in annotation:
annotation[myclust] = {}
annotation[myclust][mytype_new] = myvalue
# foreach line
open_file.close()
return annotation, split, required
def read_annotation_file (ann_file, ann_conf):
"""
Collect annotation evidence for protein families used for prioritization
Input: filename of the characterization file
Output: ann = {Cluster_XYZ: {prevalence:0.001, abundance:0.3, ...}, ...}
"""
config.logger.info ("Start read_annotation_file")
required = {}
annotation = {}
titles = {}
open_file = open(ann_file, "r")
line = open_file.readline()
line = re.sub("\n$", "", line)
info = line.split("\t")
for item in info:
titles[item] = info.index(item)
for line in open_file:
line = re.sub("\n$", "", line)
if not len(line):
continue
info = line.split("\t")
myclust = info[titles[utilities.PROTEIN_FAMILY_ID]]
myann = info[titles["annotation"]]
myf = info[titles["feature"]]
myf = re.sub("-", "_", myf)
myf = re.sub("\.", "_", myf)
myf = re.sub("\(", "_", myf)
myf = re.sub("\)", "", myf)
if myann == "NA" or myann == "NaN" or myann == "nan" or myann == "Nan":
continue
if myf.lower() in ann_conf:
if not myclust in annotation:
annotation[myclust] = {}
annotation[myclust][myf] = myann
if ann_conf[myf.lower()] == "required":
required[myf] = ""
# foreach line
open_file.close()
config.logger.info ("Finish read_annotation_file")
return annotation, required
def combine_annotation (annotation, split, required, total_ann, ann_types, required_types):
"""
Combine annotation information of protein families for prioritization
Input: ann = {Cluster_XYZ: {prevalence:0.001, abundance:0.3, ...}, ...}
attr = {Cluster_XYZ: {prevalence:0.001, abundance:0.3, ...}, ...}
split = {Cluster_XYZ:{Cluster_XYZ|A, Cluster_XYZ|B, ...}, ...}
Output: total = {Cluster_XYZ: {prevalence:0.001, abundance:0.3, ...}, ...}
"""
config.logger.info ("Start combine_annotation")
for myid in annotation.keys():
if myid in split:
for myid_new in split[myid].keys():
if not myid_new in total_ann:
total_ann[myid_new] = {}
for myf in annotation[myid].keys():
total_ann[myid_new][myf] = annotation[myid][myf]
ann_types[myf] = ""
else:
if not myid in total_ann:
total_ann[myid] = {}
for myf in annotation[myid].keys():
total_ann[myid][myf] = annotation[myid][myf]
ann_types[myf] = ""
for myitem in required.keys():
required_types[myitem] = ""
config.logger.info ("Finish combine_annotation")
def check_annotation (annotation, required_types):
"""
Select clusters with required annotation types
Input: ann = {Cluster_XYZ: {prevalence:0.001, abundance:0.3, ...}, ...}
Output: ann_new = {Cluster_abc: {prevalence:0.001, abundance:0.3, ...}, ...}
"""
# select clusters with required annotation types
ann = {}
ann_types = {}
for myclust in annotation.keys():
myflag = 0
for myitem in required_types.keys():
if not myitem in annotation[myclust]:
config.logger.info ("WARNING! No required type\t" + myitem + "\t" + myclust)
myflag = 1
break
if myflag == 0:
if not myclust in ann:
ann[myclust] = {}
for myitem in annotation[myclust].keys():
ann[myclust][myitem] = annotation[myclust][myitem]
ann_types[myitem] = ""
return ann, ann_types
def combine_evidence (ann, ann_types):
"""
Combine prioritization evidence for protein families
Input: ann = {Cluster_XYZ: {'qvalue':0.001, 'coef':-0.3, ...}, ...}
ann_types = {'qvalue', 'coef', ...}
Output: evidence_dm = {Cluster_XYZ: {'qvalue':0.001, 'coef':-0.3, 'annotation':3, ...}, ...}
"""
config.logger.info ("Start combine_evidence")
evidence_row = sorted(ann_types.keys())
metawibele_row = []
for item in evidence_row:
metawibele_row.append(item + "__value")
metawibele_row.append(item + "__percentile")
try:
evidence_table_row = namedtuple("evidence_table_row", evidence_row, verbose=False, rename=False)
except:
evidence_table_row = namedtuple("evidence_table_row", evidence_row, rename=False)
evidence_table = pd.DataFrame(index=sorted(ann.keys()), columns=evidence_table_row._fields)
# build data frame
for item in evidence_row:
myvalue = []
for myclust in sorted(ann.keys()):
if item in ann[myclust]:
myvalue.append(ann[myclust][item])
else:
# debug
#print("No item!\t" + myclust + "\t" + item)
myvalue.append("NaN")
# foreach cluster
evidence_table[item] = myvalue
# foreach evidence
config.logger.info ("Finish combine_evidence")
return evidence_table, evidence_row, metawibele_row
def get_correlated_weight (evidence_table):
"""
Calculate the pairwise correlation between evidence items and return weight table
Input: evidence_table = {family: {'abundance': abundance, 'prevalence': prevalence}}
Output: weight_conf = {'abundance': 0.5, 'prevalence': 0.5, ...}
"""
df = evidence_table
df = df.apply(pd.to_numeric, errors='coerce')
weight_conf = {}
df_corr = df.corr(method="spearman")
df_corr = abs(df_corr)
df_corr['weight'] = 1.0 / df_corr.sum(skipna=True)
for index, row in df_corr.iterrows():
weight_conf[index] = row.weight
config.logger.info (index + "\t" + str(row.weight))
return weight_conf
def get_equal_weight (ann_types):
"""
Calculate the equal weight and return weight table
Input: evidence_table = {family: {'abundance': abundance, 'prevalence': prevalence}r
Output: weight_conf = {'abundance': 0.5, 'prevalence': 0.5, ...}
"""
weight_conf = {}
myweight = 1.0 / len(ann_types.keys())
for mytype in ann_types.keys():
weight_conf[mytype] = myweight
config.logger.info (mytype + "\t" + str(myweight))
return weight_conf
def get_fixed_weight (ann_types, ann_conf, attr_conf):
"""
Calculate the fixed weight and return weight table
Input: evidence_table = {family: {'abundance': abundance, 'prevalence': prevalence}}
Output: weight_conf = {'abundance': 0.5, 'prevalence': 0.5, ...}
"""
weight_conf = {}
for mytype in ann_types.keys():
if mytype.lower() in ann_conf:
weight_conf[mytype] = ann_conf[mytype.lower()]
# debug
config.logger.info (mytype + "\t" + str(ann_conf[mytype.lower()]))
if mytype.lower() in attr_conf:
weight_conf[mytype] = attr_conf[mytype.lower()]
config.logger.info (mytype + "\t" + str(attr_conf[mytype.lower()]))
return weight_conf
def weighted_harmonic_mean (summary_table, evidence, weight_conf, score_name):
"""
Calculate the weighted harmonic mean
Input: summary_table = {family: {'abundance': 0.5, 'prevalence': 0.8}, ...}
evidence = ['abundance', 'prevalence', ...]
weight_conf = {'abundance': 0.5, 'prevalence': 0.5, ...}
Output: summary_table = {family: {'score_name': 0.9, 'abundance_value': 0.5, 'abundance_percentile':0.9,...},...}
"""
# Weighted Harmonic mean
total_weight = 0
mytype = evidence[0]
mykey = mytype + "__percentile"
myw = float(weight_conf[mytype])
total_weight = total_weight + myw
myscore = myw / summary_table[mykey]
for mytype in evidence[1:]:
mykey = mytype + "__percentile"
if mytype in weight_conf:
myw = float(weight_conf[mytype])
total_weight = total_weight + myw
myscore = myscore + myw / summary_table[mykey]
summary_table[score_name] = float(total_weight) / myscore
def arithmetic_mean (summary_table, evidence, score_name):
"""
Calculate the Arithmetic mean
Input: summary_table = {family: {'abundance': 0.5, 'prevalence': 0.8}, ...}
evidence = ['abundance', 'prevalence', ...]
weight_conf = {'abundance': 0.5, 'prevalence': 0.5, ...}
Output: summary_table = {family: {'score_name': 0.9, 'abundance_value': 0.5, 'abundance_percentile':0.9,...},...}
"""
# Arithmetic mean
total_item = 0
mytype = evidence[0]
mykey = mytype + "__percentile"
total_item = total_item + 1
myscore = summary_table[mykey]
for mytype in evidence[1:]:
mykey = mytype + "__percentile"
total_item = total_item + 1
myscore = myscore + summary_table[mykey]
summary_table[score_name] = myscore / float(total_item)
def get_rank_score (evidence_table, evidence_row, metawibele_row, weight_conf, rank_method):
"""
Return the data frame of protein families with their annotation, percentiles, and MetaWIBELE score
Input: evidence_table = {family: {'abundance': 0.5, 'prevalence': 0.8}}
beta = parameter value
Output: summary_table = {family: {'abundance_value': 0.5, 'abundance_percentiles': 0.9,...},...}
"""
config.logger.info ("Start get_rank_score")
# create a data frame
try:
metawibele_table_row = namedtuple("metawibele_table_row", metawibele_row, verbose=False, rename=False)
except:
metawibele_table_row = namedtuple("metawibele_table_row", metawibele_row, rename=False)
summary_table = pd.DataFrame(index=evidence_table.index, columns=metawibele_table_row._fields)
# calculate percentile
rank_name = []
for mytype in evidence_row:
summary_table[mytype + "__value"] = evidence_table[mytype]
summary_table[mytype + "__percentile"] = scipy.stats.rankdata(pd.to_numeric(summary_table[mytype + "__value"], errors='coerce'), method='average')
if re.search("\_coef", mytype) or re.search("\_log\_FC", mytype) or re.search("\_mean_log", mytype):
# debug
config.logger.info ("Sorting by abs(effect size), e.g. abs(coef), abs(log_FC), abs(mean_log)")
summary_table[mytype + "__percentile"] = scipy.stats.rankdata(abs( | pd.to_numeric(summary_table[mytype + "__value"], errors='coerce') | pandas.to_numeric |
#! /usr/bin/env python3
'''
HERO - Highways Enumerated by Recombination Observations
Author - <NAME>
'''
from argparse import ArgumentParser
from Bio.SeqIO import parse as BioParse
from itertools import product
import math
import multiprocessing
import os
import pandas as pd
from plotnine import *
from random import randint
import subprocess
import time
start_time = time.time()
def get_args():
parser = ArgumentParser(description='HERO - Highways Elucidated by Recombination Observations',
usage='hero.py --hero_table [table] --groups [groups_file] [options]')
parser.add_argument('--hero_table', required=True, help='HERO input table')
parser.add_argument('--groups', required=True, help='Tab-deliminated file with genomes in 1st column and groups in 2nd')
parser.add_argument('-o', '--outdir', default='hero_results', type=str, help='Output directory [hero_results]')
parser.add_argument('-c', '--cpus', default=1, type=int, help='CPUs to use [1]')
parser.add_argument('-l', '--length', default=0, type=int, help='Minimum length required to process recomb event [0]')
parser.add_argument('-b', '--bayes', default=1, type=float, help='Minimum bayes factor required to process recomb event [1]')
return parser.parse_args()
def parse_metadata(metadata_file):
''' Parse metadata into a dictionary '''
groups = {} # Key = Genome [1st column], Value = Group [2nd column]
try:
with open(metadata_file, 'r') as metafile:
for line in metafile:
line = line.strip().split()
groups[line[0]] = line[1].lower()
except FileNotFoundError:
print('Groups file {0} could not be opened. Ensure filepath is correct'.format(metadata_file))
exit(1)
return groups
def parse_table(hero_table):
'''
Parse HERO table into list of arguments
Sanity check all paths
'''
genes = []
with open(hero_table, 'r') as infile:
for line in infile:
line = line.strip().split()
if os.path.exists(line[1]) and os.path.exists(line[2]):
genes.append(line)
else:
print('Gene {0} has a bad filepath. Skipping.'.format(line[0]), flush = True)
return genes
def unpack_arguments(arg_list):
''' Unpack arguments and parse recombination '''
return parse_fastgear(arg_list[0], arg_list[1], arg_list[2])
def parse_fastgear(gene_name, fasta_path, fastgear_path):
t0 = time.time()
''' Parse recent recombination events from fastgear run '''
# Find FASTA file to parse sequence info
if not any(BioParse(fasta_path, 'fasta')):
print('{0} fasta file is bad. Removing from analysis.'.format(gene_name), flush=True)
return gene_name
# Parse FASTA file into dict
seqs_dict = {}
for record in BioParse(fasta_path, 'fasta'):
seqs_dict[record.id] = record.seq
# Setup genome class
class Genome:
strain_to_genome = {} # Key: Strain name, Value: Genome class ID
lineages = {} # Key: Lineage, Value: Strain name
def __init__(self, sequence, lineage, name):
self.name = name
self.lineage = lineage
self.sequence = sequence
# Update class dictionaries
Genome.lineages.setdefault(lineage, [])
Genome.lineages[lineage].append(name)
Genome.strain_to_genome[name] = self
# Parse lineage file and update Genome Class Dicts
try:
with open('{0}/output/lineage_information.txt'.format(fastgear_path), 'r') as fg_file:
next(fg_file)
for line in fg_file:
line = line.strip().split()
try:
seq = seqs_dict[line[3]]
except KeyError:
print('{0} could not match a sequence to ID {1}. Removing from analysis.'.format(fastgear_path, line[3]), flush=True)
return gene_name
# Add genome to Genome class
Genome(seq, line[1], line[3])
except FileNotFoundError:
return gene_name
# Parse recombination
return parse_recombination(fastgear_path, Genome, gene_name)
def parse_recombination(fastgear_run, Genome, gene_name):
''' Parse recent recombination and filter events '''
def add_event(d_lineage, s_idx, e_idx, recipient):
''' Update pair with new event and condense overlapping events '''
# Make sure pair exists
pair = d_lineage
donor_lineages.setdefault(pair, [])
# Append new index to list of events
donor_lineages[pair].append([s_idx, e_idx, [recipient]])
# Then condense events by index pairs
donor_lineages[pair].sort(key = lambda x: x[0])
merged_pairs = [] # final array to hold merged intervals
merged_recipients = []
start = -1
end = -1
for idx in range(len(donor_lineages[pair])):
cur_event = donor_lineages[pair][idx]
if cur_event[0] > end:
if idx != 0:
merged_pairs.append([start, end, merged_recipients])
merged_recipients = []
end = cur_event[1]
start = cur_event[0]
merged_recipients.extend(cur_event[2])
elif cur_event[1] >= end:
end = cur_event[1]
merged_recipients.extend(cur_event[2])
if end != -1 and [start,end] not in merged_pairs:
merged_pairs.append([start, end, merged_recipients])
donor_lineages[pair] = merged_pairs
# Open recent recomb file
try:
recomb_file = open('{0}/output/recombinations_recent.txt'.format(fastgear_run), 'r')
next(recomb_file)
next(recomb_file)
except FileNotFoundError:
print('{0} has an incomplete fastgear run. Removing from analysis.'.format(fastgear_run), flush=True)
return gene_name
# Find external donor lineage num for gene for filtering
external_donor = str(max([int(x) for x in Genome.lineages]) + 1)
# Logs all lineage pairs and tracks unique events
donor_lineages = {} # Key: donor lineage
# Value: List of unique events
# Get event info
for line in recomb_file:
line = line.strip().split()
s_idx, e_idx = int(line[0])-1, int(line[1]) # fastGEAR includes s_idx in the sequence, so subtract one for indexing
d_lineage, strain_name = line[2], line[5]
logbf = float(line[4])
# If minimum length or bayes not met, move on (length/bayes are global vars)
# If donor lineage is external donor, move on
fragment_len = e_idx - s_idx # fastGEAR includes the start position in its len
if fragment_len < length or (math.e**logbf) < bayes or d_lineage == external_donor:
continue
# Add event to lineage pair in dict
add_event(d_lineage, s_idx, e_idx, strain_name)
recomb_file.close() # Close recomb file
# For each unique event, find the most likely donor(s).
# Then for each unique metadata group in recipients, log an event
events = set() # All recombination events
for d_lineage in donor_lineages:
for event in donor_lineages[d_lineage]:
start, end = int(event[0]), int(event[1])
sample_recipient = event[2][0]
# All genome are expected to be roughly equal. So take the first genome
recip_seq = Genome.strain_to_genome[sample_recipient].sequence[start:end]
donor_group = find_pair(start, end, d_lineage, recip_seq, Genome)
# Fit donor group to all unique recip groups
if donor_group:
for recipient in event[2]:
recip_group = metadata[recipient]
recip_strains = [strain for strain in event[2] if metadata[strain] == recip_group]
#final_info = (donor_group, recip_group, end-start, gene_name, ','.join(recip_strains))
final_info = (donor_group, recip_group, start+1, end, gene_name, ','.join(recip_strains))
events.add(final_info)
return list(events)
def find_pair(s_idx, e_idx, d_lineage, recip_seq, Genome):
''' Try to find a metadata pair that is linked by this recombination event '''
# Step 1: See if all donors in d_lineage are from same metadata group
# NOTE:Lots of checking metadata groups here.
# I always default to other in case genome wasn't established in
# metadata parsing.
# Test donors for total consistency
donors = Genome.lineages[d_lineage]
metadata.setdefault(donors[0], 'other')
metagroup = metadata[donors[0]] # metadata dict is a global var
for donor in donors[1:]:
metadata.setdefault(donor, 'other')
cur_group = metadata[donor]
if cur_group != metagroup: # Not all donors are the same
break
else: # All donors from same group! We can move on.
return metagroup
# Step 2: Not all donors fit
# Get distance of recip seq to donor recomb fragments
# Get distance of recip seq to all donor seqs
shortest = None
viable_donors = []
for donor in donors:
donor_frag = str(Genome.strain_to_genome[donor].sequence[s_idx:e_idx])
# Calculate distance between donor and recip fragment
dist = 0
for idx, nuc in enumerate(donor_frag):
if recip_seq[idx] != nuc:
dist += 1
# Compare dist to current best dist
if not shortest: # This is the first comparison
shortest = dist
viable_donors.append(donor)
continue
# All other tests
if dist < shortest:
shortest = dist
viable_donors = [donor]
elif dist == shortest:
viable_donors.append(donor)
# Step 3 (2b?): If all likely donors from same metagroup, we win.
# Otherwise, discard the event.
metagroup = metadata[viable_donors[0]]
if len(viable_donors) > 1: # If multiple donors, check for consistency
for donor in viable_donors[1:]:
if metadata[donor] != metagroup:
return None # If two metagroups exist, kill the search
# We found a good metagroup! Send the event back
return metagroup
def parse_events(recombination_events):
''' Parse events from multithreaded event finding '''
good_events = []
bad_genes = []
for gene in recombination_events:
if isinstance(gene, list): # Good events are lists, bad genes are str
for event in gene:
good_events.append(event)
else:
bad_genes.append(gene)
return good_events, bad_genes
def calculate_highway(events, unique_groups):
'''
Calculate the theshold for highways of recombination
highway = 3*IQR + Q3
IQR = Interquartile range
Q3 = Third quartile of the data
'''
recomb_events = {x:0 for x in unique_groups}
# Get all unique combinations of group pairs
for event in events:
pair = (event[0], event[1])
recomb_events.setdefault(pair, 0)
recomb_events[pair] += 1
# Calculate IQRs
recomb_counts = list(recomb_events.values())
recomb_df = pd.DataFrame({'Events': recomb_counts})
q3 = recomb_df.quantile(q=0.75)['Events']
q1 = recomb_df.quantile(q=0.25)['Events']
IQR = q3 - q1
significance_limit = q3 + (3*IQR)
return recomb_events, significance_limit
class Metagroup:
'''
Each metadata group will be given an instance.
Tracks recombination stats for each group
'''
metagroup_dict = {} # Key: metagroup string name
# Value: Metagroup object instance
def __init__(self, name):
# Recombination variables
self.name = name
self.donations = 0 # Number of donations
self.receipts = 0 # Number of receipts
self.group_stats = {} # Key: Other metagroup object string name
# Value: [donations_to, receipts_from]
# Plotting variables
self.d_pos = 0 # Number of donations already plotted
self.r_pos = 0 # Number of receipts already plotted
def total_events(self):
return self.donations + self.receipts
def add_event(event):
'''
Parse event to add donor and recipt credit
to each metagroup in event
'''
donor, recipient = event[0], event[1]
# Make object instance for each group if not already exists
Metagroup.metagroup_dict.setdefault(donor, Metagroup(donor))
Metagroup.metagroup_dict.setdefault(recipient, Metagroup(recipient))
# Add donor/recipient credit to each group
d_group = Metagroup.metagroup_dict[donor]
r_group = Metagroup.metagroup_dict[recipient]
d_group.donations += 1
r_group.receipts += 1
d_group.group_stats.setdefault(recipient, [0, 0])
r_group.group_stats.setdefault(donor, [0, 0])
d_group.group_stats[recipient][0] += 1 # Add donor credit
r_group.group_stats[donor][1] += 1 # Add recip credit
def make_circos(events, outdir):
''' Write circos files given events and list of genomes w/ metadata '''
# Log all events in Metagroup class
for event in events:
Metagroup.add_event(event)
# Write karyotype file for circos
with open('{0}/circos_karyotype.txt'.format(outdir), 'w') as k_file:
# Get random color for each group chunk
rand_colors = random_colors(len(Metagroup.metagroup_dict.keys()))
# Write color and group to karyotype file
for idx, group in enumerate(Metagroup.metagroup_dict.values()):
color = rand_colors[idx]
k_file.write('chr - {0} {0} 0 {1} {0}\n'.format(group.name.lower(), group.total_events()))
# Write link file
with open('{0}/circos_links.txt'.format(outdir), 'w') as l_file:
# Create links by the donor
for d_group in Metagroup.metagroup_dict.values():
donor = d_group.name
# Get recipient from group_stats variable
# If donor is in the list of recipients,
# Put it on the end so it looks cleaner
recipients = list(d_group.group_stats.keys())
recipients.sort(key=donor.__eq__)
for recipient in d_group.group_stats:
donations = d_group.group_stats[recipient][0]
r_group = Metagroup.metagroup_dict[recipient]
## Write link to file
# Get donor plot range and update donor positions
d_start = d_group.d_pos
d_end = d_start + donations
d_group.d_pos += donations
# Get recipient range and update recipient positions
# All receipts should be plotted away from donations
r_start = r_group.donations + r_group.r_pos
r_end = r_start + donations
r_group.r_pos += donations
# Write to file
link = donor + ' ' + str(d_start) + ' ' + str(d_end) + ' '
link += recipient + ' ' + str(r_start) + ' ' + str(r_end) + '\n'
l_file.write(link)
# Write config_file
# Tutorial to understanding circos config file can be found at:
# circos.ca/documentation/tutorials/quick_start/
with open('{0}/circos.conf'.format(outdir), 'w') as c_file:
file_contents = 'karyotype = {0}/circos_karyotype.txt\n'.format(outdir)
# Global color scheme
file_contents += '# Global color scheme\n'
file_contents += '<colors>\n'
for idx, name in enumerate(Metagroup.metagroup_dict.keys()):
file_contents += '{0}* = {1}\n'.format(name, rand_colors[idx])
file_contents += '</colors>\n'
# Basic required content (karyotype file location, ideogram creation)
file_contents += '<ideogram>\n\n<spacing>\n'
file_contents += 'default = 0.005r # Spacing between out ring chunks\n'
file_contents += '</spacing>\n\n'
# Ideogram layout details
file_contents += '# Ideogram layout details\n'
file_contents += 'radius = 0.9r # Size of radius for outer ring\n'
file_contents += 'thickness = 80p # Thickness of outer ring\n'
file_contents += 'fill = yes # Fill chunks with color?\n'
file_contents += 'stroke_color = dgrey # Color of chunk outline\n'
file_contents += 'stroke_thickness = 2p # Thickness of outline\n\n'
# Ideogram label details
file_contents += '# Ideogram label details\n'
file_contents += 'show_label = yes # Show chunk labels?\n'
file_contents += 'label_font = default # Font of the labels\n'
file_contents += 'label_radius = 1r + 75p # Where to place labels\n'
file_contents += 'label_size = 50 # Size of the label\n'
file_contents += 'label_parallel = yes # Set label parallel to chunks\n'
file_contents += '</ideogram>\n\n'
# Tick details
# << SKIPPED FOR NOW >>
# Link details
file_contents += '# Links... The actual connections\n'
file_contents += '<links>\n<link>\n'
file_contents += 'file = {0}/circos_links.txt # The file with links to draw\n'.format(outdir)
file_contents += 'ribbon = yes # Turn links into fancy ribbons\n'
file_contents += 'flat = yes # Flatten ribbons\n'
file_contents += 'z = 1 # importance for ribbon plotting\n'
file_contents += 'radius1 = 0.8r # Push donor end of ribbon inward\n'
file_contents += 'color = eval(var(chr2)) # Default link color\n'
file_contents += 'radius = 0.98r # Where links will stop at\n'
file_contents += 'bezier_radius = 0.1r # How far from center the curves are drawn\n'
file_contents += 'thickness = 5 # Default thickness\n'
# Establish rule to color links by donor chunk
file_contents += '\n<rules>\n'
file_contents += '\nflow = continue\n\n'
file_contents += '<rule>\n'
file_contents += 'condition = 1\n'
file_contents += 'color = eval(var(chr1))\n'
file_contents += '</rule>\n<rule>\n'
file_contents += 'condition = var(interchr)\n'
file_contents += 'z = 2\n'
file_contents += '</rule>\n'
file_contents += '</rules>\n\n'
file_contents += '</link>\n</links>\n\n'
# Default circos distributions to include
file_contents += '# Default circos distributions to include\n'
file_contents += '\n'
file_contents += '<<include etc/colors_fonts_patterns.conf>>\n'
file_contents += '<<include etc/housekeeping.conf>>\n'
c_file.write(file_contents)
def make_highway_circos(highway, outdir):
'''
Create 2nd circos.conf file which filters the color
of ribbons below the highway_definition threshold
'''
try:
with open('{0}/circos.conf'.format(outdir), 'r') as circos_file, open('{0}/highway_circos.conf'.format(outdir), 'w') as outfile:
for line in circos_file:
if line == '</rules>\n':
outfile.write('<rule>\n')
outfile.write('condition = (var(end1) - var(start1)) < {0}\n'.format(highway))
outfile.write('color = grey\n')
outfile.write('z = 1\n')
outfile.write('</rule>\n')
outfile.write(line)
except IOError:
print('Could not make highway circos file. Check circos.conf', flush=True)
def random_colors(num_colors):
''' Generate num_colors random colors '''
# Current optimum maximum number of groups: 51 (255//5)
colors = {k:[] for k in 'rgb'} # Dict of all R/G/B values
for color in range(num_colors): # Make each color
temp = {k: randint(0,255) for k in 'rgb'} # Get random RBG values
for k in temp:
# For each value, make sure it is at least 25 points
# different from all other values in same position
while True:
c = temp[k]
t = set(j for j in range(c-5, c+5) if 0 <= j <= 255)
if t.intersection(colors[k]):
temp[k] = randint(0,255)
else:
break
colors[k].append(temp[k])
# Format final colors
final_colors = []
for i in range(num_colors):
final_colors.append( '{0},{1},{2}'.format(colors['r'][i], colors['g'][i], colors['b'][i]))
return final_colors
def write_individual_stats(outdir, events):
'''
Write useful text files and plots for individual genome recomb data
1) Histogram of recombination fragment sizes
2) Histogram of recombination per gene
3) Histogram of recombination per recipient
'''
# Step 1: Write out fragment data and collect gene/recipient data
fragments = open('{0}/fragment_sizes.txt'.format(outdir), 'w')
recipient_counts = {}
gene_counts = {}
fragments.write('Size\n')
for event in events:
# Write out fragment now
fragments.write(str(event[3] - event[2])+'\n')
# Add 1 to the count for the gene
gene_counts.setdefault(event[4], 0)
gene_counts[event[4]] += 1
# Each genome gets 1 to its recipient count
for genome in event[5].split(','):
recipient_counts.setdefault(genome, 0)
recipient_counts[genome] += 1
fragments.close()
# Write out recipient/gene data
genes = open('{0}/gene_counts.txt'.format(outdir), 'w')
genes.write('Gene\tEvents\n')
for gene, count in gene_counts.items():
genes.write('{0}\t{1}\n'.format(str(gene), str(count)))
genes.close()
recipients = open('{0}/recipient_counts.txt'.format(outdir), 'w')
recipients.write('Recipient\tEvents\n')
for r, count in recipient_counts.items():
recipients.write('{0}\t{1}\n'.format(str(r), str(count)))
recipients.close()
# Step 2: Make each histogram
make_histogram('{0}/gene_counts.txt'.format(outdir), 'gene', '{0}/gene_counts'.format(outdir))
make_histogram('{0}/recipient_counts.txt'.format(outdir), 'recipient', '{0}/recipient_counts'.format(outdir))
make_histogram('{0}/fragment_sizes.txt'.format(outdir), 'fragment', '{0}/fragment_sizes'.format(outdir))
def make_histogram(file_loc, plot_type, filename):
'''
Make a histogram given a file location and plot type
'''
# Load in each filetype properly
if plot_type == 'gene':
datas = | pd.read_csv(file_loc, header=0, sep='\t') | pandas.read_csv |
from helper import *
import pandas as pd
import os
import glob
import re
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
import warnings
warnings.filterwarnings("ignore")
def test_feat(cond, df, cols, p, df_u):
unseen = ''
if cond =='unseen':
unseen = 'unseen'
# col is feauture comb
# p is for loss or latency
# 1: loss # 2 : latency
#print(df.columns)
X = df[cols]
X2 = df_u[cols]
if p == 1:
y = df.loss
y2 = df_u.loss
if p == 2:
y = df.latency
y2 = df_u.latency
# randomly split into train and test sets, test set is 80% of data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y2, test_size=0.2, random_state=1)
if unseen == 'unseen':
X_test = X2
y_test = y2
clf = DecisionTreeRegressor()
clf = clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
acc1 = mean_squared_error(y_test, y_pred)
clf2 = RandomForestRegressor(n_estimators=10)
clf2 = clf2.fit(X_train,y_train)
y_pred2 = clf2.predict(X_test)
acc2= mean_squared_error(y_test, y_pred2)
#print("Random Forest Accuracy:", acc2, '\n')
clf3 = ExtraTreesRegressor(n_estimators=10)
clf3 = clf3.fit(X_train,y_train)
y_pred3 = clf3.predict(X_test)
acc3= mean_squared_error(y_test, y_pred3)
#print("Extra Trees Accuracy:", acc3, '\n')
pca = PCA()
X_transformed = pca.fit_transform(X_train)
cl = DecisionTreeRegressor()
cl.fit(X_transformed, y_train)
newdata_transformed = pca.transform(X_test)
y_pred4 = cl.predict(newdata_transformed)
acc4 = mean_squared_error(y_test, y_pred4)
#print("PCA Accuracy:", acc4, '\n')
return [acc1, acc2, acc3, acc4 ]
def getAllCombinations( cond_):
lst = ['total_bytes','max_bytes','proto', "1->2Bytes",'2->1Bytes'
,'1->2Pkts','2->1Pkts','total_pkts','number_ms', 'pkt_ratio','time_spread', 'pkt sum','longest_seq'
,'total_pkt_sizes']
lst1 = ["max_bytes", "longest_seq", "total_bytes"]
lst2 = ["total_pkts", "number_ms", "byte_ratio"]
if cond_ == 1:
lst = lst1
if cond_ == 2:
lst = lst2
uniq_objs = set(lst)
combinations = []
for obj in uniq_objs:
for i in range(0,len(combinations)):
combinations.append(combinations[i].union([obj]))
combinations.append(set([obj]))
print("all combinations generated")
return combinations
def test_mse(cond, all_comb1, all_comb2):
unseen = ''
if cond =='unseen':
unseen = 'unseen'
filedir_unseen = os.path.join(os.getcwd(), "outputs", unseen + "combined_t_latency.csv")
df_unseen = pd.read_csv(filedir_unseen)
filedir = os.path.join(os.getcwd(), "outputs", "combined_t_latency.csv")
df = pd.read_csv(filedir)
all_comb1 = pd.Series(all_comb1).apply(lambda x: list(x))
all_comb2 = pd.Series(all_comb2).apply(lambda x: list(x))
dt = []
rf = []
et = []
pca = []
for i in all_comb1:
acc_loss = test_feat(cond, df, i, 1, df_unseen)
dt.append(acc_loss[0])
rf.append(acc_loss[1])
et.append(acc_loss[2])
pca.append(acc_loss[3])
dt2 = []
rf2 = []
et2 = []
pca2 = []
for i in all_comb2:
# 1 = loss
# 2 = latency
acc_latency = test_feat(cond, df, i, 2, df_unseen)
#print(accs)
dt2.append(acc_latency[0])
rf2.append(acc_latency[1])
et2.append(acc_latency[2])
pca2.append(acc_latency[3])
dict1 = pd.DataFrame({'feat': all_comb1, 'dt': dt, 'rf': rf, 'et': et, 'pca': pca})
dict2 = pd.DataFrame({'feat2': all_comb2, 'dt2': dt2, 'rf2': rf2, 'et2': et2, 'pca2': pca2})
#feat_df = pd.concat([dict1, dict2], axis=1).drop(['feat2'], axis=1)
path = os.path.join(os.getcwd() , "outputs")
dict1.to_csv(os.path.join(path, unseen + "feat_df1.csv"), index = False)
dict2.to_csv(os.path.join(path, unseen + "feat_df2.csv"), index = False)
# return feat_df
def best_performance(cond):
unseen = ''
if cond == 'unseen':
unseen = 'unseen'
#print("finding best loss performance")
filedir1 = os.path.join(os.getcwd(), "outputs", unseen + "feat_df1.csv")
df1 = pd.read_csv(filedir1)
print( "\n")
print("Loss Performance sorted from lowest to highest", "\n")
print(df1.sort_values(by=['dt', 'rf', 'et', 'pca'], ascending = True)[:5], "\n")
#print("Loss Performance sorted from highest to lowest")
#print(df1.sort_values(by=['dt', 'rf', 'et', 'pca'], ascending = False)[:5])
#print("finding best latency performance")
filedir2 = os.path.join(os.getcwd(), "outputs", unseen + "feat_df2.csv")
df2 = | pd.read_csv(filedir2) | pandas.read_csv |
import pandas as pd
import threading
import queue
import time
from itertools import combinations
from .logger import LoggerFactory
from lib.agentinfo import AgentInfoFactory
logger = LoggerFactory.getLogger(__name__)
class PolicyOptimizer():
def __init__(self, agentInfo, minAgents, depth, threads, timeout):
self.agentInfo = AgentInfoFactory.getAgentInfo(agentInfo) if agentInfo != None else None
self.minAgents = minAgents
self.depth = depth
self.threads = threads
self.timeout = timeout
self.totalQuality = 0
def optimize2(self, agentConfigurations):
(c, configurations) = self.createConfigurationMatrix(agentConfigurations)
g = pd.DataFrame()
for attribute in self.agentInfo["header"]:
g[attribute] = c.loc[f"_{attribute}_"][2:]
# loop through the groups
for name, group in g.groupby(self.agentInfo["header"]):
gc = self.filter(c, self.agentInfo["header"], list(name) if type(name) is tuple else [name])
logger.info(f"Optimizing agent group *** {name} ***")
# horizontal max
x = c[gc.iloc[:,2:].sum(axis=0).sort_values(ascending=False).index[0]]
# vertical max
y = c[gc.sum(axis=0).sort_values(ascending=False).index[0]]
# new matrix
n = gc.loc[x[x == True]][y[y == True]]
s=c[x]
s[s == True].index
def optimize(self, agentConfigurations, threshold):
baseConfigs = {}
(c, configurations) = self.createConfigurationMatrix(agentConfigurations)
if len(configurations) == 0 or len(c.columns) == 2:
logger.warn("Nothing found to optimize.")
return c, configurations, baseConfigs
if self.agentInfo != None:
# generate the group by dataframe
g = | pd.DataFrame() | pandas.DataFrame |
## Visualize results
import matplotlib.pyplot as plt
import scipy.stats as stat
import numpy as np
import pandas as pd
from collections import defaultdict
import time, os
from operator import add
## Initialize
ML = 'LogisticRegression'
nGene = 200
adj_pval_cutoff = 0.01
test_datasets = ['Auslander', 'Prat_MELANOMA', 'Riaz_pre']
controls = ['PD1', 'PD-L1', 'CTLA4', 'PD1_PD-L1_CTLA4', 'CD8T1', 'T_exhaust_Pos', 'CAF1', 'TAM_M2_M1_Pos', 'all-TME-Bio']
training_size = 0.8
## Import data & make plots
# draw AUC or AUPRC plot
def draw_AUC_AUPRC(draw_plot_for='AUC', nGene=nGene, adj_pval=adj_pval_cutoff, controls=['PD1'], ML=ML):
# output directory
plt_dir = '../../result/2_cross_study_prediction'
tmp_directories = ['%s_plots'%draw_plot_for, 'nGene_%s_adj_pval_%s'%(nGene, adj_pval)]
for tdir in tmp_directories:
if os.path.isdir('%s/%s'%(plt_dir, tdir)) == False:
os.mkdir('%s/%s'%(plt_dir, tdir))
plt_dir = '%s/%s'%(plt_dir, tdir)
# import data
df = | pd.read_csv('../../result/2_cross_study_prediction/across_study_performance.txt', sep='\t') | pandas.read_csv |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
| assert_invalid_comparison(obj, other, box_with_array) | pandas.tests.arithmetic.common.assert_invalid_comparison |
#Online References used :
#https://github.com/imadmali/movie-scraper/blob/master/MojoLinkExtract.py
#https://www.crummy.com/software/BeautifulSoup/bs4/doc/
#https://nycdatascience.com/blog/student-works/scraping-box-office-mojo/
#https://www.youtube.com/watch?v=XQgXKtPSzUI
# https://www.youtube.com/watch?v=aIPqt-OdmS0
#https://www.youtube.com/watch?v=XQgXKtPSzUI
from bs4 import BeautifulSoup
import pandas as pd
import os
import requests
import glob
import re
class WebScrapping:
def __init__(self, url, user_input):
self.url = url
self.user_input = user_input
self._scrape_data_for_movies()
def _scrape_data_for_movies(self):
file_path = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')#This is written in order to save the txt file in the user's specified location on the machine
file_path = os.path.join(file_path, 'BoxOfficeMojo_4428vs')#Folder name to be created where the file will be stored
if not os.path.exists(str(file_path)):
os.mkdir(str(file_path))#If path does not exist create the path
os.chdir(file_path)#Change the directory of the file path
if len(glob.glob("*")) != 0: #The glob module finds all the pathnames matching a specified pattern according to the rules used by the Unix shell
file_list = glob.glob("*")
for file in file_list:
os.remove(file)
#The url of the BoxOffice Mojo to be scraped
url = 'https://www.boxofficemojo.com/yearly/chart/?page={}&view=releasedate&view2=domestic&yr={}&p=.htm'.format(
1, user_input)
pages_data = [] #List to store the pages data
total_pages = []
response = requests.get(url) # Get the response of the url after passing the user input
soup = BeautifulSoup(response.content, 'html.parser')# Using the beautiful soup library to parse the html content and format it
for page in soup.find_all('a', href=lambda href: href and "page" in href): #find the href in a tags
pages_data.append(page['href'])#append the data in the pages_data list
for page in pages_data:
if 'page' in page: #If "page" found in href
index = page.find('page')#Take the index of that page if found
if page[index:index + 6] not in total_pages:
#For extracting the total number of pages
total_pages.append(page[index:index + 6]) #for example : page=2 so in order to get the total number of pages and iterate through it it goes from 1 till end of pages for pagination
for num in range(1, len(total_pages) + 1, 1):
try:
url = 'https://www.boxofficemojo.com/yearly/chart/?page={}&view=releasedate&view2=domestic&yr={}&p=.htm'.format(
num, user_input) # This one works well
print("Page number {} for the year {}".format(num, user_input))
#Get the Response
response_from_url = requests.get(url)
html = response_from_url.text
soup = BeautifulSoup(html, 'lxml')# lxml is a pretty extensive library written for parsing XML and HTML documents very quickly
table = soup.find('table', {"cellspacing": "1"})
#Using dataframes
df = pd.read_html(str(table), skiprows=2)
df = df[0]
df = df.iloc[:, :7] # This is used to slice the dataframe to cut off the date sections.
# headers = df.dtypes.index For getting the columnn names
df.columns = ['rank', 'title', 'studio', 'total gross', 'total theaters', 'opening gross', 'opening theaters']
df = df[:len(df.index) - 3]
df = df[['title', 'studio', 'total gross', 'total theaters', 'opening gross', 'opening theaters']]
df['id'] = '' #Use to get the id's of the movies
id_list = []
title_list = df['title'].tolist()
for link in soup.findAll('a', {'href': re.compile("\?id=")}): # Getting the ids
id_list.append(link.get('href')) # Adding the movie id to the list
id_list = [x.split("=")[1] for x in id_list] # isolating the id 1
id_list = [x.split(".")[0] for x in id_list] # isolating the id 2
id_list = id_list[
1:] # cutting off the first entry (first entry gives the #1 box office entry for the current week).
id_dict = dict(zip(title_list, id_list))
for index in df.index:
df.loc[index, 'id'] = id_dict[df.loc[index, 'title']]#For all the indexes in the movie list
df.to_csv("{}-{}.csv".format(user_input, num), index=False)
except ValueError:
print("Please enter a valid url or a value that can be parsed")
continue
#Conversion of txt file to csv
file_list = glob.glob("*.csv")
df_container = []
for file in file_list:
df = | pd.read_csv(file) | pandas.read_csv |
import pandas as pd
from IPython.display import display
from scrapy.crawler import CrawlerProcess
from ecommercecrawler.spiders.kakaoshopping import KakaoshoppingSpider
from ecommercecrawler.spiders.navershopping import NavershoppingSpider
if __name__ == "__main__":
data = {"messagetype": ["pp", "a", "pm"], "tags": ["신발", "코트", "셔츠"]}
df = | pd.DataFrame(data=data) | pandas.DataFrame |
#!/usr/bin/env python3
import sys
import os
import argparse
import pandas as pd
import glob
import datetime as dt
import math
def main():
parser = argparse.ArgumentParser(description="Preprocess reference collection: randomly select samples and write into individual files in lineage-specific directories.")
parser.add_argument('-m, --metadata', dest='metadata', type=str, help="metadata tsv file for full sequence database")
parser.add_argument('-f, --fasta', dest='fasta_in', type=str, help="fasta file representing full sequence database")
parser.add_argument('-k', dest='select_k', type=int, default=1000, help="randomly select 1000 sequences per lineage")
parser.add_argument('--max_N_content', type=float, default=0.01, help="remove genomes with N rate exceeding this threshold; default = 0.01 (1%)")
parser.add_argument('--country', dest='country', type=str, help="only consider sequences found in specified country")
parser.add_argument('--state', dest='state', type=str, help="only consider sequences found in specified state")
parser.add_argument('--startdate', dest='startdate', type=dt.date.fromisoformat, help="only consider sequences found on or after this date; input should be ISO format")
parser.add_argument('--enddate', dest='enddate', type=dt.date.fromisoformat, help="only consider sequences found on or before this date; input should be ISO format")
parser.add_argument('--seed', dest='seed', default=0, type=int, help="random seed for sequence selection")
parser.add_argument('-o, --outdir', dest='outdir', type=str, default="seqs_per_lineage", help="output directory")
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
# create output directory
try:
os.mkdir(args.outdir)
except FileExistsError:
pass
# read metadata
metadata_df = read_metadata(args.metadata, args.max_N_content)
# remove duplicate sequences
metadata_df.drop_duplicates(subset=["Virus name",
"Collection date",
"Submission date"],
inplace=True,
ignore_index=True)
# extract lineage info
lineages = metadata_df["Pango lineage"].unique()
# select sequences
selection_dict = {}
lineages_with_sequence = []
for lin_id in lineages:
# create lineage directory
try:
os.mkdir("{}/{}".format(args.outdir, lin_id))
except FileExistsError:
# empty existing directory
old_files = glob.glob("{}/{}/*".format(args.outdir, lin_id))
for f_trash in old_files:
os.remove(f_trash)
# filter for lineage, country and length
samples = metadata_df.loc[metadata_df["Pango lineage"] == lin_id]
# add extra row to avoid pandas bug (https://github.com/pandas-dev/pandas/issues/35807)
samples = samples.append(pd.Series({"Location" : ". / . / ."}),
ignore_index=True)
samples[["continent", "country", "state"]] = \
samples["Location"].str.split(" / ", n=2, expand=True)
if args.country:
samples = samples.loc[samples["country"] == args.country]
else:
samples = samples.loc[samples["country"] != "."]
if args.state:
samples = samples.loc[samples["state"] == args.state]
if args.startdate:
samples = samples.loc[
samples["date"] >= pd.to_datetime(args.startdate)]
if args.enddate:
samples = samples.loc[
samples["date"] <= | pd.to_datetime(args.enddate) | pandas.to_datetime |
#### Filename: Connection.py
#### Version: v1.0
#### Author: <NAME>
#### Date: March 4, 2019
#### Description: Connect to database and get atalaia dataframe.
import psycopg2
import sys
import os
import pandas as pd
import logging
from configparser import ConfigParser
from resqdb.CheckData import CheckData
import numpy as np
import time
from multiprocessing import Process, Pool
from threading import Thread
import collections
import datetime
import csv
from dateutil.relativedelta import relativedelta
import json
class Connection():
""" The class connecting to the database and exporting the data for the Slovakia.
:param nprocess: number of processes
:type nprocess: int
:param data: the name of data (resq or atalaia)
:type data: str
"""
def __init__(self, nprocess=1, data='resq'):
start = time.time()
# Create log file in the working folder
debug = 'debug_' + datetime.datetime.now().strftime('%d-%m-%Y') + '.log'
log_file = os.path.join(os.getcwd(), debug)
logging.basicConfig(filename=log_file,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logging.info('Connecting to datamix database!')
# Get absolute path
path = os.path.dirname(__file__)
self.database_ini = os.path.join(path, 'database.ini')
# Read temporary csv file with CZ report names and Angels Awards report names
path = os.path.join(os.path.dirname(__file__), 'tmp', 'czech_mapping.json')
with open(path, 'r', encoding='utf-8') as json_file:
cz_names_dict = json.load(json_file)
# Set section
datamix = 'datamix-backup'
# datamix = 'datamix'
# Check which data should be exported
if data == 'resq':
# Create empty dictionary
# self.sqls = ['SELECT * from resq_mix', 'SELECT * from ivttby_mix', 'SELECT * from thailand', 'SELECT * from resq_ivttby_mix']
self.sqls = ['SELECT * from resq_mix', 'SELECT * from ivttby_mix', 'SELECT * from thailand']
# List of dataframe names
self.names = ['resq', 'ivttby', 'thailand']
elif data == 'atalaia':
self.sqls = ['SELECT * from atalaia_mix']
self.names = []
elif data == 'qasc':
self.sqls = ['SELECT * FROM qasc_mix']
self.names = []
elif data == 'africa':
self.sqls = ['SELECT * FROM africa_mix']
self.names = []
# Dictionary initialization - db dataframes
self.dictdb_df = {}
# Dictioanry initialization - prepared dataframes
self.dict_df = {}
if nprocess == 1:
if data == 'resq':
for i in range(0, len(self.names)):
df_name = self.names[i]
self.connect(self.sqls[i], datamix, nprocess, df_name=df_name)
# self.connect(self.sqls[2], datamix, nprocess, df_name='resq_ivttby_mix')
# self.resq_ivttby_mix = self.dictdb_df['resq_ivttby_mix']
# self.dictdb_df['resq_ivttby_mix'].to_csv('resq_ivttby_mix.csv', sep=',', index=False)
# if 'resq_ivttby_mix' in self.dictdb_df.keys():
# del self.dictdb_df['resq_ivttby_mix']
for k, v in self.dictdb_df.items():
self.prepare_df(df=v, name=k)
self.df = pd.DataFrame()
for i in range(0, len(self.names)):
self.df = self.df.append(self.dict_df[self.names[i]], sort=False)
logging.info("Connection: {0} dataframe has been appended to the resulting dataframe!".format(self.names[i]))
# Get all country code in dataframe
self.countries = self._get_countries(df=self.df)
# Get preprocessed data
self.preprocessed_data = self.check_data(df=self.df, nprocess=1)
self.preprocessed_data['RES-Q reports name'] = self.preprocessed_data.apply(lambda x: cz_names_dict[x['Protocol ID']]['report_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
self.preprocessed_data['ESO Angels name'] = self.preprocessed_data.apply(lambda x: cz_names_dict[x['Protocol ID']]['angels_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
##############
# ONSET TIME #
##############
self.preprocessed_data['HOSPITAL_TIME'] = pd.to_datetime(self.preprocessed_data['HOSPITAL_TIME'], format='%H:%M:%S').dt.time
try:
self.preprocessed_data['HOSPITAL_TIMESTAMP'] = self.preprocessed_data.apply(lambda x: datetime.datetime.combine(x['HOSPITAL_DATE'], x['HOSPITAL_TIME']) if not | pd.isnull(x['HOSPITAL_TIME']) | pandas.isnull |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from keras import optimizers
from keras.layers import Dense
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
from pandas import ExcelFile
from pandas import ExcelWriter
from scipy import ndimage
from scipy.stats import randint as sp_randint
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn import datasets
from sklearn import metrics
from sklearn import pipeline
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.utils import resample
from tensorflow.python.framework import ops
import keras
import matplotlib.pyplot as plt
import numpy as np
import openpyxl
import pandas as pd
import scipy
import tensorflow as tf
import xlsxwriter
import numpy as np
from keras import layers
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
import tensorflow as tf
from keras.initializers import glorot_uniform
import scipy.misc
from matplotlib.pyplot import imshow
import keras.backend as K
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
import tensorflow as tf
import keras
from keras import backend as K
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
import pandas as pd
import numpy as np
import os
from matplotlib import pyplot as plt
import keras
from sklearn.utils import shuffle
from keras.models import Sequential, Model
from keras.layers import Conv2D, MaxPooling2D, Input, GlobalMaxPooling2D
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
from sklearn.utils import shuffle
from multiprocessing import freeze_support
from sklearn import preprocessing
from rdkit import Chem
from mordred import Calculator, descriptors
from padelpy import from_smiles
from padelpy import padeldescriptor
# In[2]:
from padelpy import from_smiles
# In[2]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[4]:
trfile = open('train.csv', 'r')
line = trfile.readline()
mols_train=[]
dataY_train=[]
smiles_train=[]
for i, line in enumerate(trfile):
line = line.rstrip().split(',')
smiles = str(line[1])
smiles_train.append(smiles)
Activity = str(line[0])
mol = Chem.MolFromSmiles(smiles)
mols_train.append(mol)
dataY_train.append(Activity)
trfile.close()
dataY_train = np.array(dataY_train)
print("SMIES are extracted in list in mols_train and activity in an array dataY_train")
print('dataY_train Shape: '+str(np.shape(dataY_train)))
# In[5]:
with open('train.smi', 'w') as filehandle:
for listitem in smiles_train:
filehandle.write('%s\n' % listitem)
trfile = open('test.csv', 'r')
line = trfile.readline()
mols_test=[]
dataY_test=[]
smiles_test=[]
for i, line in enumerate(trfile):
line = line.rstrip().split(',')
smiles = str(line[1])
smiles_test.append(smiles)
Activity = str(line[0])
mol = Chem.MolFromSmiles(smiles)
mols_test.append(mol)
dataY_test.append(Activity)
trfile.close()
dataY_test = np.array(dataY_test)
print("SMIES are extracted in list in mols_test and activity in an array dataY_test")
print('dataY_test Shape: '+str(np.shape(dataY_test)))
# In[6]:
with open('test.smi', 'w') as filehandle:
for listitem in smiles_test:
filehandle.write('%s\n' % listitem)
padeldescriptor(mol_dir='test.smi',d_2d=True, d_3d=False,fingerprints=False, removesalt=True, retainorder=True,
d_file='test_2D.csv',
maxruntime=100000, threads=1)
# In[3]:
padeldescriptor(mol_dir='train.smi',d_2d=True, d_3d=False,fingerprints=False, removesalt=True, retainorder=True,
d_file='train_2D.csv',
maxruntime=100000, threads=1)
dataX_train=pd.read_csv('train_2D.csv')
# In[7]:
print(np.shape(dataX_train))
# In[8]:
dataX_test=pd.read_csv('test_2D.csv')
# In[9]:
print(np.shape(dataX_test))
# In[10]:
dataX=pd.concat([dataX_train,dataX_test])
# In[11]:
print(np.shape(dataX))
# In[12]:
dataX
# In[ ]:
# In[13]:
#This function gets the raw data and clean it
def data_clean(data):
print("Data shape before cleaning:" + str(np.shape(data)))
#Change the data type of any column if necessary.
print("Now it will print only those columns with non-numeric values")
print(data.select_dtypes(exclude=[np.number]))
#Now dropping those columns with zero values entirely or which sums to zero
data= data.loc[:, (data != 0).any(axis=0)]
#Now dropping those columns with NAN values entirely
data=data.dropna(axis=1, how='all')
data=data.dropna(axis=0, how='all')
#Keep track of the columns which are exculded after NAN and column zero sum operation above
print("Data shape after cleaning:" + str(np.shape(data)))
return data
# In[14]:
dataX= data_clean(dataX)
# In[15]:
#This function impute the missing values with features (column mean)
def data_impute(data):
#Seprating out the NAMES of the molecules column and ACTIVITY column because they are not the features to be normalized.
data_input=data.drop(['Name'], axis=1)
data_names = data.Name
#Imputing the missing values with features mean values
fill_NaN = Imputer(missing_values=np.nan, strategy='mean', axis=0)
Imputed_Data_input = pd.DataFrame(fill_NaN.fit_transform(data_input))
Imputed_Data_input.index = data_input.index
print(np.shape(Imputed_Data_input))
print("Data shape after imputation:" + str(np.shape(Imputed_Data_input)))
nanmask = np.isnan(fill_NaN.statistics_)
print(nanmask)
return Imputed_Data_input, data_names
# In[16]:
Imputed_Data_input, data_names=data_impute(dataX)
# In[17]:
print(np.shape(Imputed_Data_input))
# In[18]:
Imputed_Data_input
# In[19]:
#This function is to normalize features
def data_norm(Imputed_Data_input):
#Calculatig the mean and STD of the imputed input data set
Imputed_Data_input_mean=Imputed_Data_input.mean()
Imputed_Data_input_std=Imputed_Data_input.std()
#z-score normalizing the whole input data:
Imputed_Data_input_norm = (Imputed_Data_input - Imputed_Data_input_mean)/Imputed_Data_input_std
#Adding names and labels to the data again
#frames = [data_names,data_labels, Imputed_Data_input_norm]
#full_data_norm = pd.concat(frames,axis=1)
return Imputed_Data_input_norm
# In[20]:
full_data_norm=data_norm(Imputed_Data_input)
# In[21]:
full_data_norm
# In[22]:
print(np.shape(dataX_train))
# In[23]:
dataX_train=full_data_norm[0:dataX_train.shape[0]]
# In[24]:
dataX_test=full_data_norm[dataX_train.shape[0]:]
# In[25]:
dataX_test
# In[ ]:
# In[26]:
print(np.shape(dataX_train))
# In[27]:
print(np.shape(dataX_test))
# In[28]:
desc_number=dataX_train.shape[1]
# In[29]:
X = tf.placeholder(tf.float32, [None, desc_number])
Y = tf.placeholder(tf.float64, [None, 1])
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[30]:
py_x =keras.layers.Dense(1000, kernel_initializer ='glorot_normal', activation='sigmoid')(X)
py_x = keras.layers.Dropout(0.7)(py_x)
py_x =keras.layers.Dense(1000, activation='sigmoid')(py_x )
py_x = keras.layers.Dropout(0.5)(py_x)
py_x =keras.layers.Dense(1000, activation='relu')(py_x )
py_x = keras.layers.Dropout(0.5)(py_x)
py_x =keras.layers.Dense(1000, activation='sigmoid')(py_x )
py_x = keras.layers.Dropout(0.5)(py_x)
py_x =keras.layers.Dense(1000, activation='relu')(py_x )
py_x = keras.layers.Dropout(0.5)(py_x)
py_x =keras.layers.Dense(1000, activation='sigmoid')(py_x )
py_x = keras.layers.Dropout(0.5)(py_x)
py_x =keras.layers.Dense(1000, activation='relu')(py_x )
py_x = keras.layers.Dropout(0.5)(py_x)
py_x =keras.layers.Dense(1000, activation='sigmoid')(py_x )
py_x = keras.layers.Dropout(0.5)(py_x)
py_x =keras.layers.Dense(1000, activation='sigmoid')(py_x )
py_x = keras.layers.Dropout(0.5)(py_x)
py_x =keras.layers.Dense(1000, activation='sigmoid')(py_x )
py_x = keras.layers.Dropout(0.5)(py_x)
py_x =keras.layers.Dense(1000, activation='relu')(py_x )
py_x = keras.layers.Dropout(0.5)(py_x)
py_x =keras.layers.Dense(10, activation='relu')(py_x )
py_x = keras.layers.Dropout(0.2)(py_x)
py_x1 = keras.layers.Dense(1, activation='linear')(py_x)
# In[ ]:
# In[31]:
cost1 = tf.losses.mean_squared_error(labels=Y, predictions=py_x1)
# In[45]:
train_op1 = tf.train.AdamOptimizer(learning_rate = 5e-6).minimize(cost1)
# In[46]:
prediction_error1 = tf.sqrt(cost1)
# In[47]:
import tensorflow as tf
# In[48]:
data_x_train=dataX_train
data_y_train=dataY_train
data_x_test=dataX_test
data_y_test=dataY_test
# In[49]:
print(np.shape(data_y_test))
# In[50]:
data_y_test.shape[0]
# In[51]:
data_y_test = (np.array(dataY_test, dtype=np.float32)).reshape(dataY_test.shape[0],1)
# In[52]:
data_y_train = (np.array(dataY_train, dtype=np.float32)).reshape(dataY_train.shape[0],1)
# In[53]:
batch_size = 32
# In[54]:
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
SAVER_DIR = "model_ld50"
saver = tf.train.Saver()
ckpt_path = os.path.join(SAVER_DIR, "model_ld50")
ckpt = tf.train.get_checkpoint_state(SAVER_DIR)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
best_rmse = 10
best_idx = 0
LD50_R2_train = []
#LD50_R2_valid = []
LD50_R2_test = []
LD50_RMSE_train = []
#LD50_RMSE_valid = []
LD50_RMSE_test = []
LD50_MAE_train = []
#LD50_MAE_valid = []
LD50_MAE_test = []
steps=[]
for i in range(1000):
steps.append(i)
training_batch = zip(range(0, len(data_x_train), batch_size),
range(batch_size, len(data_x_train)+1, batch_size))
#for start, end in tqdm.tqdm(training_batch):
for start, end in training_batch:
sess.run(train_op1, feed_dict={X: data_x_train[start:end], Y: data_y_train[start:end]})
merr_train_1 = sess.run(prediction_error1, feed_dict={X: data_x_train, Y: data_y_train})
print('Epoch Number: '+str(i))
print('RMSE_Train: '+str(merr_train_1))
LD50_RMSE_train.append(merr_train_1)
train_preds1 = sess.run(py_x1, feed_dict={X: data_x_train})
train_r1 = r2_score(data_y_train, train_preds1)
train_mae = mean_absolute_error(data_y_train, train_preds1)
print('R^2_Train: '+str(train_r1))
LD50_R2_train.append(train_r1)
print('MAE_Train: '+str(train_mae))
LD50_MAE_train.append(train_mae)
print(" ")
merr_test_1 = sess.run(prediction_error1, feed_dict={X: data_x_test, Y: data_y_test})
print('Epoch Number: '+str(i))
print('RMSE_test: '+str(merr_test_1))
LD50_RMSE_test.append(merr_test_1)
test_preds1 = sess.run(py_x1, feed_dict={X: data_x_test})
test_r1 = r2_score(data_y_test, test_preds1)
test_mae = mean_absolute_error(data_y_test, test_preds1)
print('R^2_test: '+str(test_r1))
LD50_R2_test.append(test_r1)
print('MAE_test: '+str(test_mae))
LD50_MAE_test.append(test_mae)
print(" ")
if best_rmse > merr_test_1:
best_idx = i
best_rmse = merr_test_1
save_path = saver.save(sess, ckpt_path)
print('model saved!')
print("###########################################################################")
# In[55]:
####################################################################
#=========================== test part ============================#
####################################################################
saver = tf.train.Saver()
ckpt_path = os.path.join(SAVER_DIR, "model_FCPC")
ckpt = tf.train.get_checkpoint_state(SAVER_DIR)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, ckpt.model_checkpoint_path)
print("model loaded successfully!")
test_rmse = sess.run(prediction_error1, feed_dict={X: data_x_test, Y: data_y_test})
print('RMSE of the test after loading the best model: '+str(test_rmse))
test_preds = sess.run(py_x1, feed_dict={X: data_x_test})
test_r = r2_score(data_y_test, test_preds)
test_mae = mean_absolute_error(data_y_test, test_preds)
print('R^2_test after loading the best model: '+str(test_r))
print('MAE_test after loading the best model: '+str(test_mae))
print(test_preds)
test_preds = | pd.DataFrame(test_preds) | pandas.DataFrame |
import argparse
import os
import warnings
import subprocess
subprocess.call(['pip', 'install', 'sagemaker-experiments'])
import pandas as pd
import numpy as np
import tarfile
from smexperiments.tracker import Tracker
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.compose import make_column_transformer
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
columns = ["status","duration","credit_history","purpose","amount","savings","employment_duration","installment_rate","personal_status_sex","other_debtors","present_residence","property","age","other_installment_plans","housing","number_credits","job","people_liable","telephone","foreign_worker","credit_risk"]
if __name__=='__main__':
# Read the arguments passed to the script.
parser = argparse.ArgumentParser()
parser.add_argument('--train-test-split-ratio', type=float, default=0.3)
args, _ = parser.parse_known_args()
# Tracking specific parameter value during job.
tracker = Tracker.load()
tracker.log_parameter('train-test-split-ratio', args.train_test_split_ratio)
print('Received arguments {}'.format(args))
# Read input data into a Pandas dataframe.
input_data_path = os.path.join('/opt/ml/processing/input', 'SouthGermanCredit.txt')
print('Reading input data from {}'.format(input_data_path))
df = pd.read_csv(input_data_path,names=columns,header=0,sep=r' ')
df.columns = columns
# Defining one-hot encoders.
transformer = make_column_transformer(
(['credit_history', 'purpose','personal_status_sex','other_debtors','property','other_installment_plans','housing','job','telephone','foreign_worker'], OneHotEncoder(sparse=False)), remainder="passthrough"
)
X = df.drop('credit_risk', axis=1)
y = df['credit_risk']
featurizer_model = transformer.fit(X)
features = featurizer_model.transform(X)
labels = LabelEncoder().fit_transform(y)
# Splitting.
split_ratio = args.train_test_split_ratio
print('Splitting data into train and validation sets with ratio {}'.format(split_ratio))
X_train, X_val, y_train, y_val = train_test_split(features, labels, test_size=split_ratio, random_state=0)
print('Train features shape after preprocessing: {}'.format(X_train.shape))
print('Validation features shape after preprocessing: {}'.format(X_val.shape))
# Saving outputs.
train_features_output_path = os.path.join('/opt/ml/processing/train', 'train_features.csv')
train_labels_output_path = os.path.join('/opt/ml/processing/train', 'train_labels.csv')
val_features_output_path = os.path.join('/opt/ml/processing/val', 'val_features.csv')
val_labels_output_path = os.path.join('/opt/ml/processing/val', 'val_labels.csv')
print('Saving training features to {}'.format(train_features_output_path))
| pd.DataFrame(X_train) | pandas.DataFrame |
import pandas as pd
import numpy as np
import zipfile
import os
import scipy as sp
import matplotlib.pyplot as plt
import plotly.express as px
import zipfile
import pathlib
def top_ions(col_id_unique):
""" function to compute the top species, top filename and top species/plant part for each ion
Args:
df1 = reduced_df, table of with index on sp/part column and features only.
df2 = quantitative.csv file, output from MZmine
Returns:
None
"""
#computes the % for each feature
dfA = pd.read_csv('../data_out/reduced_df.tsv', sep='\t', index_col=[0])
dfA = dfA.copy().transpose()
dfA = dfA.div(dfA.sum(axis=1), axis=0)
dfA.reset_index(inplace=True)
dfA.rename(columns={'index': 'row ID'}, inplace=True)
dfA.set_index('row ID', inplace=True)
dfA = dfA.astype(float)
dfA['Feature_specificity'] = dfA.apply(lambda s: s.abs().nlargest(1).sum(), axis=1)
dfA.reset_index(inplace=True)
#df1 = df1.drop([0], axis=1)
dfA = dfA[['row ID', 'Feature_specificity']]
dfA['row ID']=dfA['row ID'].astype(int)
#computes the top filename for each ion
df2 = pd.read_csv('../data_out/quant_df.tsv', sep='\t', index_col=[0])
df2 = df2.div(df2.sum(axis=1), axis=0)
df2 = df2.copy()
df2 = df2.astype(float)
df2 = df2.apply(lambda s: s.abs().nlargest(1).index.tolist(), axis=1)
df2 = df2.to_frame()
df2['filename'] = pd.DataFrame(df2[0].values.tolist(), index= df2.index)
df2 = df2.drop([0], axis=1)
df = pd.merge(left=dfA,right=df2, how='left',on='row ID')
if col_id_unique != 'filename':
#computes the top species/part for each feature
df3 = pd.read_csv('../data_out/reduced_df.tsv', sep='\t', index_col=[0])
df3 = df3.transpose()
df3 = df3.astype(float)
df3 = df3.apply(lambda s: s.abs().nlargest(1).index.tolist(), axis=1)
df3 = df3.to_frame()
df3[[col_id_unique]] = pd.DataFrame(df3[0].values.tolist(),index= df3.index)
df3 = df3.drop([0], axis=1)
df3.reset_index(inplace=True)
df3.rename(columns={'index': 'row ID'}, inplace=True)
df3['row ID'] = df3['row ID'].astype(int)
#merge all the data
df = pd.merge(left=df3, right=df, how='left', on='row ID')
else:
df
df.to_csv('../data_out/specificity_df.tsv', sep='\t')
return df
def annotations(df2, df3,
sirius_annotations, isbd_annotations,
min_score_final, min_ConfidenceScore, min_ZodiacScore):
"""
function to check the presence of annotations by feature in the combined information form gnps &/ in silico
Args:
df1 = annot_gnps_df # mandatory
df2 = tima_results_filename
df3 = sirius_annotations_filename
only_ms2_annotations =
sirius_annotations =
isbd_annotations =
min_score_final =
min_ConfidenceScore =
min_ZodiacScore =
Returns:
None
"""
#ONLY GNPS
#find null values (non annotated)
df1 = pd.read_csv('../data_out/annot_gnps_df.tsv', sep='\t').drop(['Unnamed: 0'],axis=1)
df = df1.copy()
df['Annotated'] = pd.isnull(df['Consol_InChI'])
#lets replace the booleans
bD = {True: '0', False: '1'}
df['Annotated_GNPS'] = df['Annotated'].replace(bD)
#reduced
df = df[['cluster index', 'componentindex', 'Annotated_GNPS']]
df = df.fillna({'Annotated_GNPS':0})
if isbd_annotations == True:
# work on df2 (isdb annotations)
df2 = pd.merge(left=df1[['cluster index']],
right=df2,
how='left', left_on= 'cluster index', right_on='feature_id')
#recover one value from multiple options:
df2['score_final'] = df2['score_final'].str.split('|').str[-1].astype(float)
df2['lib_type'] = df2['score_initialNormalized'].str.split('|').str[-1].astype(float)
df2.drop('score_initialNormalized', axis=1, inplace=True)
df2['molecular_formula'] = df2['molecular_formula'].str.split('|').str[-1].astype(str)
def score_final_isdb(final_score):
if final_score >= min_score_final:
annotated=1 #good annotation
else:
annotated=0 #'bad annotation'
return annotated
df2['Annotated_ISDB'] = df2.apply(lambda x: score_final_isdb(x['score_final']), axis=1)
df2.loc[df2['lib_type']== 'MS1_match', 'Annotated_ISDB'] = 0
#merge the information
df = pd.merge(left=df, right=df2[['cluster index','Annotated_ISDB']],
how='left', on= 'cluster index')
else:
df
if sirius_annotations == True:
# work on df3 (sirius annotations)
#get the feature id
df3['shared name'] = df3['id'].str.split('_').str[-1].astype(int)
df3 = pd.merge(left=df1[['cluster index']],
right=df3[['shared name','ConfidenceScore','ZodiacScore']],
how='left', left_on= 'cluster index', right_on='shared name')
df3['ConfidenceScore'] = df3['ConfidenceScore'].fillna(0)
def Sirius_annotation(ConfidenceScore, ZodiacScore):
if ConfidenceScore >= min_ConfidenceScore and ZodiacScore >= min_ZodiacScore:
annotated=1 #good annotation
else:
annotated=0 #'bad annotation'
return annotated
df3['Annotated_Sirius'] = df3.apply(lambda x: Sirius_annotation(x['ConfidenceScore'], x['ZodiacScore']), axis=1)
#df3.head(2)
#merge the information
df = pd.merge(left=df, right=df3[['cluster index','Annotated_Sirius']],
how='left',on= 'cluster index')
else:
df
def annotations_gnps(df):
""" function to classify the annotations results
Args:
df = treated and combinend table with the gnps and insilico results
Returns:
None
"""
if isbd_annotations == True and sirius_annotations == True:
if (df['Annotated_GNPS'] == '1') | (df['Annotated_ISDB'] == '1') | (df['Annotated_Sirius'] == '1'):
return 1
else:
return 0
elif isbd_annotations == True and sirius_annotations == False:
if (df['Annotated_GNPS'] == '1') | (df['Annotated_ISDB'] == '1'):
return 1
else:
return 0
elif isbd_annotations == False and sirius_annotations == True:
if (df['Annotated_GNPS'] == '1') | (df['Annotated_Sirius'] == '1'):
return 1
else:
return 0
else:
if (df['Annotated_GNPS'] == '1'):
return 1
else:
return 0
df['annotation'] = df.apply(annotations_gnps, axis=1)
df.to_csv('../data_out/annotations_df.tsv', sep='\t')
return df
def mf_rate(df, sirius_annotations, min_ZodiacScore, min_specificity, annotation_preference):
""" function to calculate a rate of non annotated specific features with a predicte MF of good quality
Args:
df = annotations from Sirius
Returns: dataframe with the rate
None
"""
if sirius_annotations == True:
df1 = pd.read_csv('../data_out/annot_gnps_df.tsv', sep='\t').drop(['Unnamed: 0'],axis=1)
df2 = df.copy()
df2['shared name'] = df2['id'].str.split('_').str[-1].astype(int)
df3 = pd.read_csv('../data_out/specificity_df.tsv', sep='\t').drop(['Unnamed: 0'],axis=1)
df4 = pd.read_csv('../data_out/annotations_df.tsv', sep='\t').drop(['Unnamed: 0'],axis=1)
df5 = | pd.merge(left=df1[['cluster index']],right=df2[['shared name','ZodiacScore']], how='left', left_on= 'cluster index', right_on='shared name') | pandas.merge |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputq',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputq`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('quarterly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('quarterly.html',sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##yearly
@app.route("/yearlyforecast",methods = ['GET','POST'])
def yearlyforecast():
data = pd.DataFrame(Yeardata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/12
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date']
vari=[]
for var in tdf:
vari.append(var[:4])
tres11 = vari
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/12
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputy`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputy`")
con.commit()
sql = "INSERT INTO `forecastoutputy` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
dindex=(tdfs.index).strftime("20%y")
tdfs['Date']=(dindex)
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='A')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='A', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputy',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputy`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('yearly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('yearly.html',sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
#############################Dashboard#######################################
#yearly
@app.route('/youtgraph', methods = ['GET','POST'])
def youtgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputy` GROUP BY `Model`")
sfile=cur.fetchall()
global yqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
yqst=qlst.values
con.close()
return render_template('ydashboard.html',qulist=yqst)
@app.route('/youtgraph1', methods = ['GET', 'POST'])
def youtgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputy` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date']
index=np.concatenate((indx,edata['Date'].values),axis=0)
yindx=[]
for var in index:
var1 = var[:4]
yindx.append(var1)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('ydashboard.html',mon=value,qulist=yqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=yindx,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#monthly
@app.route('/moutgraph', methods = ['GET','POST'])
def moutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutput` GROUP BY `Model`")
sfile=cur.fetchall()
global mqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
mqst=qlst.values
con.close()
return render_template('mdashboard.html',qulist=mqst)
@app.route('/moutgraph1', methods = ['GET', 'POST'])
def moutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutput` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('mdashboard.html',mon=value,qulist=mqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#quarterly
@app.route('/qoutgraph', methods = ['GET','POST'])
def qoutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputq` GROUP BY `Model`")
sfile=cur.fetchall()
global qst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
qst=qlst.values
con.close()
return render_template('qdashboard.html',qulist=qst)
@app.route('/qoutgraph1', methods = ['GET', 'POST'])
def qoutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputq` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('qdashboard.html',mon=value,qulist=qst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
@app.route("/yearlysimulation",methods = ['GET','POST'])
def yearlysimulation():
if request.method == 'POST':
gdp=0
pi=0
ms=0
adv=0
gdp_dis=request.form.get('gdp_dis')
pi_dis=request.form.get('pi_dis')
ms_dis=request.form.get('ms_dis')
adv_dis=request.form.get('adv_dis')
min=request.form.get('min')
max=request.form.get('max')
mue=request.form.get('mue')
sig=request.form.get('sig')
cval=request.form.get('cval')
min1=request.form.get('min1')
max1=request.form.get('max1')
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
cval1=request.form.get('cval1')
min2=request.form.get('min2')
max2=request.form.get('max2')
mue2=request.form.get('mue2')
sig2=request.form.get('sig2')
cval2=request.form.get('cval2')
min3=request.form.get('min3')
max3=request.form.get('max3')
mue3=request.form.get('mue3')
sig3=request.form.get('sig3')
cval3=request.form.get('cval3')
itr= int(request.form.get('itr'))
frm = request.form.get('from')
sfrm=int(frm[:4])
to = request.form.get('to')
sto=int(to[:4])
kwargs={}
atrtable=[]
if request.form.get('gdp'):
gdp=1
atrtable.append('Gdp')
if gdp_dis == 'gdp_dis1':
min=request.form.get('min')
max=request.form.get('max')
kwargs['Gdp_dis']='Uniform'
kwargs['gdpvalues']=[min,max]
if gdp_dis == 'gdp_dis2':
mue=request.form.get('mue')
sig=request.form.get('sig')
kwargs['Gdp_dis']='Normal'
kwargs['gdpvalues']=[mue,sig]
if gdp_dis == 'gdp_dis3':
kwargs['Gdp_dis']='Random'
pass
if gdp_dis == 'gdp_dis4':
cval=request.form.get('cval')
kwargs['Gdp_dis']='Constant'
kwargs['gdpvalues']=[cval]
if request.form.get('pi'):
pi=1
atrtable.append('Pi')
if pi_dis == 'pi_dis1':
min1=request.form.get('min1')
max1=request.form.get('max1')
kwargs['Pi_dis']='Uniform'
kwargs['pivalues']=[min1,max1]
if pi_dis == 'pi_dis2':
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
kwargs['Pi_dis']='Normal'
kwargs['pivalues']=[mue1,sig1]
if pi_dis == 'pi_dis3':
kwargs['Pi_dis']='Random'
pass
if pi_dis == 'pi_dis4':
cval1=request.form.get('cval1')
kwargs['Pi_dis']='Constant'
kwargs['pivalues']=[cval1]
if request.form.get('ms'):
ms=1
atrtable.append('Ms')
if ms_dis == 'ms_dis1':
min=request.form.get('min2')
max=request.form.get('max2')
kwargs['Ms_dis']='Uniform'
kwargs['msvalues']=[min2,max2]
if ms_dis == 'ms_dis2':
mue=request.form.get('mue2')
sig=request.form.get('sig2')
kwargs['Ms_dis']='Normal'
kwargs['msvalues']=[mue2,sig2]
if ms_dis == 'ms_dis3':
kwargs['Ms_dis']='Random'
pass
if ms_dis == 'ms_dis4':
cval=request.form.get('cval2')
kwargs['Ms_dis']='Constant'
kwargs['msvalues']=[cval2]
if request.form.get('adv'):
adv=1
atrtable.append('Adv')
if adv_dis == 'adv_dis1':
min=request.form.get('min3')
max=request.form.get('max3')
kwargs['Adv_dis']='Uniform'
kwargs['advvalues']=[min3,max3]
if adv_dis == 'adv_dis2':
mue=request.form.get('mue3')
sig=request.form.get('sig3')
kwargs['Adv_dis']='Normal'
kwargs['advvalues']=[mue3,sig3]
if adv_dis == 'adv_dis3':
kwargs['Adv_dis']='Random'
pass
if adv_dis == 'adv_dis4':
cval=request.form.get('cval3')
kwargs['Adv_dis']='Constant'
kwargs['advvalues']=[cval3]
#print(kwargs)
#print(atrtable)
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `stech` (`gdp` VARCHAR(1),`pi` VARCHAR(1), `ms` VARCHAR(1),`adv` VARCHAR(1),`itr` VARCHAR(5),`sfrm` VARCHAR(10),`sto` VARCHAR(10))")
cur.execute("DELETE FROM `stech`")
con.commit()
cur.execute("INSERT INTO `stech` VALUES('"+str(gdp)+"','"+str(pi)+"','"+str(ms)+"','"+str(adv)+"','"+str(itr)+"','"+str(sfrm)+"','"+str(sto)+"')")
con.commit()
data = pd.DataFrame(Yeardata)
#print(data)
data.columns
xvar=pd.concat([data['GDP'],data['Pi_Exports'],data['Market_Share'],data['Advertisement_Expense']],axis=1)
yvar=pd.DataFrame(data['TotalDemand'])
regr = linear_model.LinearRegression()
regr.fit(xvar,yvar)
# predict=regr.predict(xvar)
#Error Measures
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
def sim(iteration,data,startyear,endyear,atrtable,Gdp_dis=None,gdpvalues=None,Adv_dis=None,advvalues=None,Ms_dis=None,msvalues=None,Pi_dis=None,pivalues=None):
preddata=pd.DataFrame()
simdata=pd.DataFrame()
#Errordf=pd.DataFrame()
Errormsr=pd.DataFrame()
date=pd.date_range(start=pd.datetime(startyear, 1, 1), end=pd.datetime(endyear+1, 1, 1),freq='A')
date=pd.DataFrame(date.strftime("%Y"))
#Fetching The Orignal Data Of Available Years of the Orignal Data That We Have Actually
m=len(date)
Arrayofdates=data['Date']
vari=[]
for var in Arrayofdates:
vari.append(var[:4])
Arrayofdates=pd.DataFrame(vari)
dates=[]
Fetchdata=[]
for i in range(0,m):
years=date.loc[i]
for j in range(0,len(Arrayofdates)):
if int(Arrayofdates.loc[j])==int(years):
da=data['TotalDemand'].loc[j]
Fetchdata.append(da) #Gives Data In the Given Range That we have actually
dates.extend(years) #Gives Years that we have data
for i in range(0,iteration):
df=pd.DataFrame()
#for The Gdp
S='flag'
for row in atrtable:
if row=='Gdp':
S='Gdp'
if S=='Gdp':
for row in Gdp_dis:
if row=='Normal':
gdpdf=pd.DataFrame(np.random.normal(gdpvalues[0],gdpvalues[1],m))
elif row=='Uniform':
gdpdf=pd.DataFrame(np.random.normal(gdpvalues[0],gdpvalues[1],m))
elif row=='Constant':
gdpdf=pd.DataFrame(np.random.choice([gdpvalues[0]],m))
else:
gdpdf=pd.DataFrame(np.random.uniform(-4,4,m))
else:
gdpdf=pd.DataFrame(np.random.uniform(0,0,m))
# for the pi dataframe
O='flag'
for row in atrtable:
if row=='Pi':
O='Pi'
if O=='Pi':
for row in Pi_dis:
if row=='Normal':
pidf=pd.DataFrame(np.random.normal(pivalues[0],pivalues[1],m))
elif row=='Uniform':
pidf=pd.DataFrame(np.random.normal(pivalues[0],pivalues[1],m))
elif row=='Constant':
pidf=pd.DataFrame(np.random.choice([pivalues[0]],m))
else:
pidf=pd.DataFrame(np.random.random_integers(80,120,m))
else:
pidf=pd.DataFrame(np.random.uniform(0,0,m))
#for the Adv Dataframe
N='flag'
for row in atrtable:
if row=='Adv':
N='Adv'
if N=='Adv':
for row in Adv_dis:
if row=='Normal':
advdf=pd.DataFrame(np.random.normal(advvalues[0],advvalues[1],m))
elif row=='Uniform':
advdf=pd.DataFrame(np.random.normal(advvalues[0],advvalues[1],m))
elif row=='Constant':
advdf=pd.DataFrame(np.random.choice([advvalues[0]],m))
else:
advdf=pd.DataFrame(np.random.random_integers(500000,1000000,m))
else:
advdf=pd.DataFrame(np.random.uniform(0,0,m))
#for the Ms dataframe
U='flag'
for row in atrtable:
if row=='Ms':
U='Ms'
if U=='Ms':
for row in Ms_dis:
if row=='Normal':
msdf=pd.DataFrame(np.random.normal(msvalues[0],msvalues[1],m))
elif row=='Uniform':
msdf=pd.DataFrame(np.random.normal(msvalues[0],msvalues[1],m))
elif row=='Constant':
msdf=pd.DataFrame(np.random.choice([msvalues[0]],m))
else:
msdf=pd.DataFrame(np.random.uniform(0.1,0.5,m))
else:
msdf=pd.DataFrame(np.random.uniform(0,0,m))
#Concatenating All the dataframes for Simulation Data
df=pd.concat([gdpdf,pidf,msdf,advdf],axis=1)
simid=pd.DataFrame(np.random.choice([i+1],m))
dd=pd.concat([simid,gdpdf,pidf,advdf,msdf],axis=1)
dd.columns=['Year','Gdp','Pi','Adv','Ms']
simdata=pd.concat([simdata,dd],axis=0)
#Predicting the Data And store in pred data through onhand Regression Method
dfs=pd.DataFrame(regr.predict(df))
datatable=pd.concat([simid,date,dfs],axis=1)
datatable.columns=['simid','Year','Total_Demand(Tonnes)']
preddata=pd.concat([datatable,preddata],axis=0)
datas=list()
#Geting Data With Respective Dates
# print(datatable)
for row in dates:
# print(dates)
datas.extend(datatable.loc[datatable['Year'] ==row, 'Total_Demand(Tonnes)'])
kkk=pd.DataFrame(datas)
me=ME(Fetchdata,kkk)
mae=MAE(Fetchdata,kkk)
mape=MAPE(Fetchdata,kkk)
dfe=pd.DataFrame([me,mae,mape],index=['ME','MAE','MAPE']).T
Errormsr=pd.concat([Errormsr,dfe],axis=0).reset_index(drop=True)
return preddata,simdata,Errormsr
preddata,simdata,Errormsr=sim(itr,data,sfrm,sto,atrtable,**kwargs)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
preddata.to_sql(con=engine, name='predicteddata',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
simdata.to_sql(con=engine2, name='simulationdata',index=False, if_exists='replace')
con.commit()
engine3 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Errormsr.to_sql(con=engine3, name='simerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `simerror`")
sdata = cnr.fetchall()
simerror = pd.DataFrame(sdata)
con.close()
return render_template('ysimulation.html',sayy=1,simerror=simerror.to_html(index=False))
return render_template('ysimulation.html')
##PROCURMENT PLANNING
@app.route('/procurementplanning')
def procurementplanning():
return render_template('vendorselection_criterianumberask.html')
@app.route("/criteriagenerate", methods=['GET','POST'])
def criteriagenerate():
if request.method == 'POST':
global cnmbr
global vnmbr
cnmbr = int(request.form['cnmbr'])
vnmbr = int(request.form['vnmbr'])
if cnmbr == 0 or vnmbr==0:
return render_template('criterianumberask.html',warning='Data Invalid')
cmainlist=[]
global cnames
cnames = []
for i in range (1,cnmbr+1):
lst=[]
name='cname'+str(i)
lst.append(i)
lst.append(name)
cmainlist.append(lst)
cnames.append(name)
vmainlist=[]
global vnames
vnames = []
for i in range (1,vnmbr+1):
lst=[]
name='vname'+str(i)
lst.append(i)
lst.append(name)
vmainlist.append(lst)
vnames.append(name)
return render_template('vendorselection_criteriagenerate.html',cmainlist=cmainlist,vmainlist=vmainlist)
return render_template('vendorselection_criterianumberask.html')
@app.route("/criteriagenerated", methods=['GET','POST'])
def criteriagenerated():
if request.method == 'POST':
global criterianames
criterianames=[]
for name in cnames:
criterianame = request.form[name]
criterianames.append(criterianame)
global vendornames
vendornames=[]
for name in vnames:
vendorname = request.form[name]
vendornames.append(vendorname)
mcrlst=[]
cn=len(criterianames)
k=1
global maincriteriaoption
maincriteriaoption=[]
global maincritriacri
maincritriacri=[]
for i in range(cn-1):
for j in range (i+1,cn):
cri='criteriaorder'+str(k)
opt='coption'+str(k)
crlst=[k,cri,criterianames[i],criterianames[j],opt]
mcrlst.append(crlst)
k=k+1
maincriteriaoption.append(opt)
maincritriacri.append(cri)
mvrlst=[]
vn=len(vendornames)
k=1
global mainvendoroption
mainvendoroption=[]
global mainvendorcri
mainvendorcri=[]
for z in criterianames:
mvrlst1=[]
vcri=[]
vopt=[]
for i in range(vn-1):
for j in range (i+1,vn):
cri='vendororder'+z+str(k)
opt='voption'+z+str(k)
vrlst=[k,cri,vendornames[i],vendornames[j],opt]
mvrlst1.append(vrlst)
k=k+1
vcri.append(cri)
vopt.append(opt)
mvrlst.append(mvrlst1)
mainvendorcri.append(vcri)
mainvendoroption.append(vopt)
return render_template('vendorselection_maincriteria.html',mcrlst=mcrlst,mvrlst=mvrlst)
return render_template('vendorselection_criteriagenerated.html')
def tablecreator(imp,val,crit):
n=len(imp)
for i in range(n):
if imp[i]==1:
val[i]=float(1/val[i])
fdata=pd.DataFrame(columns=[crit],index=[crit])
i=0
k=0
for index in fdata.index:
j=0
for columns in fdata.columns:
if i==j:
fdata[index][columns]=1
if i<j:
fdata[index][columns]=round((float(val[k])),2)
fdata[columns][index]=round((1/val[k]),2)
k=k+1
j=j+1
i=i+1
return fdata
@app.route("/criteriaread", methods=['GET','POST'])
def criteriaread():
if request.method == 'POST':
importances = []
values = []
for name1 in maincritriacri:
imp = int(request.form[name1])
importances.append(imp)
for name2 in maincriteriaoption:
val = int(request.form[name2])
values.append(val)
#global maincriteriadata
maincriteriadata=tablecreator(importances,values,criterianames)
mainimportances=[]
for crioption in mainvendorcri:
importance=[]
for option1 in crioption:
impc = int(request.form[option1])
importance.append(impc)
mainimportances.append(importance)
mainvalues=[]
for vendoroption in mainvendoroption:
vvalues=[]
for option2 in vendoroption:
valuev = int(request.form[option2])
vvalues.append(valuev)
mainvalues.append(vvalues)
maindf=[]
for z in range(len(criterianames)):
df=tablecreator(mainimportances[z],mainvalues[z],vendornames)
maindf.append(df)
dictmain={'crit':maincriteriadata}
names=criterianames
dfs=maindf
dictionary=dict((n,d) for (n,d) in zip(names,dfs))
def ahpmain(dictmain):
global wt_Crit
wt_Crit=[]
key=[]
key=list(dictmain.keys())
for i in key:
Crit=np.dot(dictmain[i],dictmain[i])
row_sum=[]
for j in range(len(Crit)):
row_sum.append(sum(Crit[j]))
wt_Crit.append([s/sum(row_sum) for s in row_sum])
Crit=[]
return wt_Crit
def ahp(dictmain,dictionary):
global output
main= ahpmain(dictmain)
submain= ahpmain(dictionary)
dd=pd.DataFrame(submain).T
df=pd.DataFrame(main).T
output=np.dot(dd,df)
return output,dd
yaxis,dd=ahp(dictmain,dictionary)
yax=pd.DataFrame(yaxis,index=vendornames,columns=['Score']).sort_values('Score',ascending=False).T
ynames=yax.columns
yval=yax.T.values
dd.index=vendornames
dd.columns=names
dd=dd.T
opq23=[]
for column in dd.columns:
opq21=[]
opq22=[]
opq21.append(column)
for val in dd[column]:
opq22.append(val)
opq21.append(opq22)
opq23.append(opq21)
return render_template('vendorselection_ahp_final_output.html',ynames=ynames,yval=yval,dd=opq23,names=names)
return render_template('vendorselection_criteriagenerated.html')
#DETERMINISTIC STARTS
@app.route("/spt")
def spt():
return render_template('SinglePeriod.html')
@app.route("/ppbreak")
def ppbreak():
return render_template('pbreak.html')
@app.route('/pbrk', methods=['GET','POST'])
def pbrk():
return render_template('pbrk.html')
@app.route('/eoq', methods=['GET','POST'])
def eoq():
##setUpCost::setting up cost prior(>>setUpCost;<<moving rate)
AnnulaUnitsDemand=100##purchase demand of product per year
FixedCost=500 ##cost fixed for the product
AnnHoldingcost=0.25 ##remaining goods cost
UnitCost=445 ##purchasing cost
LeadTime=10 ##time b/w initiation and completion of a production process.
SafetyStock=100##extra stock
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=float(AnnulaUnitsDemand)
FixedCost=float(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=float(UnitCost)
LeadTime=float(LeadTime)
SafetyStock=float(SafetyStock)
sgap=1
pgap=1
HoldingCost=AnnHoldingcost*UnitCost
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))*sgap),2)
REOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))*sgap),0)
totOrderCost=round((FixedCost*AnnulaUnitsDemand/EOQ),2)
totHoldCost=round(((HoldingCost*EOQ*pgap)/2),2)
TotalCost=round((totOrderCost+totHoldCost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count=round((EOQ*.75),0)
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(count)
hclist.append(round((count/2*HoldingCost),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round((count/2*HoldingCost+AnnulaUnitsDemand/count*FixedCost),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
tclist.append(totHoldCost+totOrderCost)
while (count < (EOQ*2)):
qtylist1.append(count)
hclist.append(round((count/2*HoldingCost),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round((count/2*HoldingCost+AnnulaUnitsDemand/count*FixedCost),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
# sstock=int(math.sqrt((LeadTime^2)+(int(ReorderPoint)^2)))
return render_template('eoq.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,LeadTime=LeadTime,
SafetyStock=SafetyStock)
########################EEEEppppppppppQQQQQQ############
########################EEEEppppppppppQQQQQQ############
@app.route('/eproduction', methods=['GET','POST'])
def eproduction():
AnnulaUnitsDemand=100
Prodrate=125
FixedCost=500
AnnHoldingcost=0.1
UnitCost=25000
LeadTime=10
SafetyStock=100
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
Prodrate=request.form['Prodrate']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=int(AnnulaUnitsDemand)
Prodrate=int(Prodrate)
FixedCost=int(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=int(UnitCost)
LeadTime=int(LeadTime)
SafetyStock=int(SafetyStock)
if(Prodrate<=AnnulaUnitsDemand):
return render_template('eproduction.html',warning='Production date should not be least than Annual Demand',
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,Prodrate=Prodrate,
LeadTime=LeadTime,SafetyStock=SafetyStock
)
pgap=round((1-(AnnulaUnitsDemand/Prodrate)),2)
HoldingCost=float(AnnHoldingcost*UnitCost)
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))),2)
REOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))),0)
totOrderCost=round((FixedCost*AnnulaUnitsDemand/EOQ),2)
totHoldCost=round(((HoldingCost*EOQ*pgap)/2),2)
TotalCost=round((totOrderCost+totHoldCost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count=EOQ*.75
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(int(count))
hclist.append(round((count/2*HoldingCost*pgap),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round(((count/2*HoldingCost*pgap+AnnulaUnitsDemand/count*FixedCost)),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
tclist.append(totOrderCost+totHoldCost)
while (count < (EOQ*1.7)):
qtylist1.append(int(count))
hclist.append(round((count/2*HoldingCost*pgap),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round(((count/2*HoldingCost*pgap+AnnulaUnitsDemand/count*FixedCost)),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
return render_template('eproduction.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,Prodrate=Prodrate,
LeadTime=LeadTime,SafetyStock=SafetyStock
)
######################EEEEppppppppppQQQQQQ############
######################EEEEppppppppppQQQQQQ############
@app.route('/eoq_backorders', methods=['GET','POST'])
def eoq_backorders():
AnnulaUnitsDemand=12000
shortcost=1.1
FixedCost=8000
AnnHoldingcost=0.3
UnitCost=1
LeadTime=10
SafetyStock=100
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
shortcost=request.form['shortcost']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=int(AnnulaUnitsDemand)
shortcost=int(shortcost)
FixedCost=int(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=int(UnitCost)
LeadTime=int(LeadTime)
SafetyStock=int(SafetyStock)
HoldingCost=float(AnnHoldingcost*UnitCost)
sgap=(shortcost+HoldingCost)/shortcost
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/HoldingCost))*(math.sqrt(sgap)),2)
REOQ=round(math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost)*sgap),0)
totbackorder=EOQ*(HoldingCost/(shortcost+HoldingCost))
totOrderCost=round(((FixedCost*AnnulaUnitsDemand)/EOQ),2)
totHoldCost=round(((HoldingCost*((EOQ-totbackorder)**2))/(2*EOQ)),2)
totshortcost=round((shortcost*(totbackorder**2)/(2*EOQ)),2)
TotalCost=round((totOrderCost+totHoldCost+totshortcost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count= EOQ*.75
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
shlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(int((count)))
hclist.append(round(((HoldingCost*((count-totbackorder)**2))/(2*count)),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
shlist.append(round((shortcost*((totbackorder)**2)/(2*count)),2))
tclist.append(round(((((HoldingCost*((count-totbackorder)**2))/(2*count))+AnnulaUnitsDemand/count*FixedCost)+shortcost*((totbackorder)**2)/(2*count)),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
shlist.append(totshortcost)
tclist.append(totOrderCost+totshortcost+totHoldCost)
while (count < (EOQ*1.7)):
qtylist1.append(int((count)))
hclist.append(round(((HoldingCost*((count-totbackorder)**2))/(2*count)),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
shlist.append(round((shortcost*((totbackorder)**2)/(2*count)),2))
tclist.append(round(((((HoldingCost*((count-totbackorder)**2))/(2*count))+AnnulaUnitsDemand/count*FixedCost)+shortcost*((totbackorder)**2)/(2*count)),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
return render_template('eoq_backorders.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
shlist=shlist,sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,shortcost=shortcost,
LeadTime=LeadTime,SafetyStock=SafetyStock)
#################pbreak######################
@app.route("/pbreak_insert", methods=['GET','POST'])
def pbreak_insert():
if request.method == 'POST':
quantity = request.form.getlist("quantity[]")
price = request.form.getlist("price[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("CREATE TABLE IF NOT EXISTS `pbreaktable` (quantity int(8),price int(8))")
curr.execute("DELETE FROM `pbreaktable`")
conn.commit()
say=1
for i in range(len(quantity)):
quantity_clean = quantity[i]
price_clean = price[i]
if quantity_clean and price_clean:
curr.execute("INSERT INTO `pbreaktable`(`quantity`,`price`) VALUES('"+quantity_clean+"','"+price_clean+"')")
conn.commit()
else:
say=0
if say==0:
message="Some values were not inserted!"
else:
message="All values were inserted!"
return(message)
@app.route('/view', methods=['GET','POST'])
def view():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("SELECT * FROM `pbreaktable`")
res = curr.fetchall()
ress=pd.DataFrame(res)
return render_template('pbrk.html',username=username,ress =ress.to_html())
@app.route('/pbreakcalculate', methods=['GET','POST'])
def pbreakcalculate():
AnnulaUnitsDemand=10
FixedCost=1
AnnHoldingcost=0.1
UnitCost=445
LeadTime=10
SafetyStock=100
if request.method == 'POST':
if request.form['AnnulaUnitsDemand']:
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
AnnulaUnitsDemand=float(AnnulaUnitsDemand)
if request.form['FixedCost']:
FixedCost=request.form['FixedCost']
FixedCost=float(FixedCost)
if request.form['AnnHoldingcost']:
AnnHoldingcost=request.form['AnnHoldingcost']
AnnHoldingcost=float(AnnHoldingcost)
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("SELECT * FROM `pbreaktable`")
res = curr.fetchall()
ress=pd.DataFrame(res)
conn.close()
datatable=pd.DataFrame(columns=['Quantity','Price','EOQ','TotalCost'])
mainlist=[]
Qu=ress['quantity']
Qm=0
for index, i in ress.iterrows():
tcl=[]
quantity = i['quantity']
price = i['price']
HoldingCost1=AnnHoldingcost*price
eoq1=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost1))),2)
REOQ=round(eoq1,0)
totOrderCost1=round((FixedCost*AnnulaUnitsDemand/eoq1),2)
totHoldCost1=round(((HoldingCost1*eoq1)/2),2)
totalcost1=float(round((totOrderCost1+totHoldCost1),2))
lst=[quantity,price,eoq1,totalcost1]
a=pd.DataFrame(lst).T
a.columns=['Quantity','Price','EOQ','TotalCost']
datatable=pd.concat([datatable,a],ignore_index=True)
name='TotalCost (Price='+str(a['Price'][0])+')'
tcl.append(name)
Qmin=1
Qmax=Qu[Qm]
qtylist2=[]
tclist1=[]
while (Qmin < Qmax):
qtylist2.append(Qmin)
tclist1.append(round((Qmin/2*totHoldCost1+AnnulaUnitsDemand/Qmin*FixedCost),2))
Qmin +=2
Qmin=Qmax+1
qtylist2.append(eoq1)
tclist1.append(totalcost1)
tcl.append(tclist1)
mainlist.append(tcl)
Eu=datatable['EOQ']
Qu=datatable['Quantity']
Tu=datatable['TotalCost']
minlst=[]
for i in range(len(Eu)):
if i ==0:
if Eu[i]<=Qu[i]:
minlst.append(i)
else:
if Eu[i]<=Qu[i] and Eu[i]>Qu[i-1]:
minlst.append(i)
if len(minlst)==0:
minnval='Solution not feasible'
else:
minval=Tu[minlst[0]]
minnval=Eu[minlst[0]]
for j in minlst:
if Tu[j]<minval:
minval=Tu[j]
minnval=Eu[j]
val1=0
for i in range(len(tclist1)):
if (round(minnval))==qtylist2[i]:
val1=i
minival=round(minval)
minnival=round(minnval)
NumOrders=round((AnnulaUnitsDemand/minnval),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
return render_template('pbreak.html',
NumOrders=NumOrders,OrderTime=OrderTime,REOQ=REOQ,ReorderPoint=ReorderPoint,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,LeadTime=LeadTime,
SafetyStock=SafetyStock,minnval=minnval,minval=minval,minival=minival,minnival=minnival,
datatable=datatable.to_html(index=False),mainlist=mainlist,
val1=val1,tclist1=tclist1,qtylist2=qtylist2)
#################Demand problalstic######################
@app.route('/demand', methods=['GET', 'POST'])
def demand():
cost=10
price=12
salvage=2
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
cost=int(cost)
price=int(price)
salvage=int(salvage)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
cdf=[]
sum=0
for row in data['Prob']:
sum=sum+row
cdf.append(sum)
cumm_freq=(pd.DataFrame(cdf)).values##y-axis
overcost=cost-salvage
undercost=price-cost
CSl=undercost/(undercost+overcost)
k=[row>CSl for row in cumm_freq]
count=1
for row in k:
if row==False:
count=count+1
demand=(data['Demand']).values
w=data['Demand'].loc[count]##line across x-axis
val=0
for i in range(len(cumm_freq)):
if(w==demand[i]):
val=i
return render_template('demand.html',cost=cost,price=price,salvage=salvage,
cumm_freq=cumm_freq,demand=demand,val=val)
@app.route('/normal', methods=['GET', 'POST'])
def normal():
cost=10
price=12
salvage=9
sd=2
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
cost=int(cost)
price=int(price)
salvage=int(salvage)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
overcost1=cost-salvage
undercost1=price-cost
CSl=undercost1/(undercost1+overcost1)
zz=st.norm.ppf(CSl)##x-line
z=float(format(zz, '.2f'))
# Expecteddemand=round(mea+(z*sd))
mean = 0; sd = 1; variance = np.square(sd)
x = np.arange(-4,4,.01)##x-axis
f =(np.exp(-np.square(x-mean)/2*variance)/(np.sqrt(2*np.pi*variance)))##y-axis
val=0
for i in range(len(f)):
if(z==round((x[i]),2)):
val=i
return render_template('normal.html',x=x,f=f,val=val,cost=cost,price=price,salvage=salvage)
@app.route('/utype', methods=['GET','POST'])
def utype():
cost=10
price=12
salvage=2
mini=1
maxi=10
if request.method == 'POST':
cost=request.form['cost']
price=request.form['price']
salvage=request.form['salvage']
mini=request.form['mini']
maxi=request.form['maxi']
cost=int(cost)
price=int(price)
salvage=int(salvage)
mini=int(mini)
maxi=int(maxi)
data=pd.read_csv(localpath+"\\Demand.csv")
data = pd.DataFrame(data)
overcost=cost-salvage
undercost=price-cost
CSl=undercost/(undercost+overcost)
expdemand1=round(mini+((maxi-mini)*CSl))
# a=[mini,0]
# b=[mini,100]
# c=[maxi,0]
# d=[maxi,100]
# width = c[0] - b[0]
# height = d[1] - a[1]
lims = np.arange(0,maxi,1)
val=0
for i in range(len(lims)):
if(expdemand1==lims[i]):
val=i
return render_template('utype.html',x=lims,f=lims,val=val,cost=cost,price=price,salvage=salvage,mini=mini,maxi=maxi)
@app.route('/outputx', methods=['GET', 'POST'])
def outputx():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM `abc`")
all_data = cur.fetchall()
all_data = pd.DataFrame(all_data)
A_ccat=.8
B_ccat=.95
A_ucat=.1
B_ucat=.25
tot_cost=all_data['Cost'].sum()
tot_usage=all_data['Annual Usage'].sum()
all_data['perc_cost']=all_data['Cost']/tot_cost
all_data['perc_usage']=all_data['Annual Usage']/tot_usage
all_data.sort_values(by=['perc_cost'], inplace=True, ascending=False)
sort_data=all_data.reset_index()
sort_data['cum_cperc']=np.nan
sort_data['cum_uperc']=np.nan
sort_data['Class']=''
for i in range(len(sort_data)):
if(i==0):
sort_data.set_value(i, 'cum_cperc', sort_data['perc_cost'][i])
sort_data.set_value(i, 'cum_uperc', sort_data['perc_usage'][i])
# cperc_data.append(all_data['perc_cost'][i])
sort_data.set_value(i,'Class','A')
else:
sort_data.set_value(i, 'cum_cperc', sort_data['perc_cost'][i]+sort_data['cum_cperc'][i-1])
sort_data.set_value(i, 'cum_uperc', sort_data['perc_usage'][i]+sort_data['cum_uperc'][i-1])
if(sort_data['cum_cperc'][i]<=A_ccat and sort_data['cum_uperc'][i]<=A_ucat):
sort_data.set_value(i,'Class','A')
elif(sort_data['cum_cperc'][i]<=B_ccat and sort_data['cum_uperc'][i]<=B_ucat):
sort_data.set_value(i,'Class','B')
else:
sort_data.set_value(i,'Class','C')
x7=sort_data[['cum_cperc']]
x1=x7*100
x3=np.round(x1)
x2=np.array([])
x5 = np.append(x2,x3)
y7= sort_data[['cum_uperc']]
y1=y7*100
y3=np.round(y1)
y2=np.array([])
y5 = np.append(y2,y3)
###############% of Total cost//
a= sort_data[(sort_data['Class']=='A')][['perc_cost']]
j=a.sum()
k=j*100
pd.DataFrame(k)
kf=k[0]
b= sort_data[(sort_data['Class']=='B')][['perc_cost']]
n=b.sum()
m=n*100
pd.DataFrame(m)
mf=m[0]
c= sort_data[(sort_data['Class']=='C')][['perc_cost']]
o=c.sum()
p=o*100
pd.DataFrame(p)
pf=p[0]
tes=k,m,p
t2 = np.array([])
te2 = np.append(t2,tes)
###################Items // Annual Usage
# z=sort_data[['Product number']]
# z1=z.sum()
f= sort_data[(sort_data['Class']=='A')][['Product number']]
v=f.sum()
pd.DataFrame(v)
vif=v[0]
f1= sort_data[(sort_data['Class']=='B')][['Product number']]
u=f1.sum()
pd.DataFrame(u)
uif=u[0]
f2= sort_data[(sort_data['Class']=='C')][['Product number']]
vf=f2.sum()
pd.DataFrame(vf)
kif=vf[0]
#################% of Total units // Annual Usage
t= sort_data[(sort_data['Class']=='A')][['perc_usage']]
i=t.sum()
p1=i*100
pd.DataFrame(p1)
nf=p1[0]
l= sort_data[(sort_data['Class']=='B')][['perc_usage']]
t=l.sum()
q1=t*100
pd.DataFrame(q1)
qf=q1[0]
u= sort_data[(sort_data['Class']=='C')][['perc_usage']]
w=u.sum()
s1=w*100
pd.DataFrame(s1)
sf=s1[0]
test=p1,q1,s1
tt2 = np.array([])
tte2 = np.append(tt2,test)
#############values//Cost*Annual Usage
sort_data['Value'] = sort_data['Cost'] * sort_data['Annual Usage']
fz= sort_data[(sort_data['Class']=='A')][['Value']]
vz=fz.sum()
pd.DataFrame(vz)
vzz=vz[0]
fz1= sort_data[(sort_data['Class']=='B')][['Value']]
uz=fz1.sum()
pd.DataFrame(uz)
uzf=uz[0]
fz2= sort_data[(sort_data['Class']=='C')][['Value']]
vzf=fz2.sum()
pd.DataFrame(vzf)
kzf=vzf[0]
h=[{'Scenario':'A','Values':vzz,'product number':vif,'perc_usage':nf,'perc_cost ':kf},
{'Scenario':'B','Values':uzf,'product number':uif,'perc_usage':qf,'perc_cost ':mf},
{'Scenario':'C','Values':kzf,'product number':kif,'perc_usage':sf,'perc_cost ':pf}]
df = | pd.DataFrame(h) | pandas.DataFrame |
import requests
from collections import defaultdict
import folium
import json
import pandas as pd
import os
#import plotly.express as px
coord_dict ={
'Sikkim':[27.5330,88.5122],'Andhra Pradesh':[15.9129,79.7400], 'Bihar':[25.0961,85.313], 'Chhattisgarh':[21.2787,81.8661],'Arunachal Pradesh':[28.2180,94.7278],\
'Delhi':[28.7041,77.1025], 'Gujarat':[22.2587,71.1924], 'Haryana':[29.0588,76.0856], 'Himachal Pradesh':[31.1048,77.1734], 'Karnataka':[15.3173,75.7139], \
'Kerala':[10.8505,76.2711], 'Madhya Pradesh':[22.9734,78.6569], 'Maharashtra':[19.7515,75.7139], 'Manipur':[24.6637,93.9063], 'Mizoram':[23.1645,92.9376], \
'Odisha':[20.9517,85.0985], 'Puducherry':[11.9416,79.8083], 'Punjab':[31.1471,75.3412], 'Rajasthan':[27.0238,74.2179], 'Tamil Nadu':[11.1271,78.6569],\
'Chandigarh':[30.7333,76.7794], 'Jammu and Kashmir':[33.7782,76.5762], 'Uttar Pradesh':[26.8467,80.9462],\
'Uttarakhand':[30.0668,79.0193], 'West Bengal':[22.9868,87.8550],'Telengana':[18.1124,79.0193],'Tripura':[23.9408,91.9882],'Assam':[26.2006,92.9376],
'Goa':[15.2993,74.1240],'Nagaland':[26.1584,94.5624],'Lakshadweep':[8.295441,73.048973],'Dadra and Nagar Haveli':[20.1809,73.0169], 'Daman and Diu':[20.4283,72.839],
'Jharkhand':[23.6102,85.2799],'Meghalaya':[25.4670,91.3662],'Andaman and Nicobar Islands':[11.7401,92.6586],
'Ladakh':[34.152588,77.577049]
#print(f'Data Pull status={status}')
}
def get_state_data():
'''
Collect latest statewise data for corona virus cases and plot a choropleth
map with markers describing numbers
'''
response = requests.get('https://api.rootnet.in/covid19-in/stats/daily')
response_dict = response.json()
states = defaultdict(dict)
status = response_dict['success']
cases = []#defaultdict()
#print(f'Data Pull status={status}')
state_list=['Sikkim','Andhra Pradesh', 'Bihar', 'Chhattisgarh','Arunachal Pradesh',\
'Delhi', 'Gujarat', 'Haryana', 'Himachal Pradesh', 'Karnataka', \
'Kerala', 'Madhya Pradesh', 'Maharashtra', 'Manipur', 'Mizoram', \
'Odisha', 'Puducherry', 'Punjab', 'Rajasthan', 'Tamil Nadu',\
'Chandigarh', 'Jammu and Kashmir', 'Uttar Pradesh',\
'Uttarakhand', 'West Bengal','Telengana','Tripura','Assam',
'Goa','Nagaland','Lakshadweep','Dadra and Nagar Haveli', 'Daman and Diu',
'Jharkhand','Meghalaya','Andaman and Nicobar Islands']
try:
daily_data = response_dict['data']
# to get latest day data
latest_data = daily_data[-1]
latest_regional_data = latest_data['regional']
latest_date = latest_data['day']
latest_summary = latest_data['summary']
const=0
m = folium.Map([20.5937, 78.9629],zoom_start=4.5)
for item in latest_regional_data:
if item['loc']=='Ladakh':
# handling Ladakh seperately as Ladakh geojson boundaries not available.
const = item['confirmedCasesIndian']+item['confirmedCasesForeign']+item['discharged']+item['deaths']
folium.Marker(coord_dict[item['loc']],color='blue',popup=item,tooltip=item['loc'],max_height=5,icon=folium.Icon(color='yellow', icon='user')).add_to(m)
continue
else:
k = item['loc']
states[k] = item
total_cases = item['confirmedCasesIndian']+item['confirmedCasesForeign']+item['discharged']+item['deaths']
cases.append( {'State':k ,'Total_Cases': total_cases})
folium.Marker(coord_dict[item['loc']],color='blue',popup=item,tooltip=item['loc'],max_height=5,icon=folium.Icon(color='yellow', icon='user')).add_to(m)
state_geo_data=json.load(open(os.getcwd()+'/indian_states.json'))
# centered on coordinates of india
for st in state_list:
if st not in states.keys():
cases.append( {'State':st ,'Total_Cases': 0})
states[st] = {'confirmedCasesIndian': 0, 'confirmedCasesForeign': 0, 'discharged': 0, 'deaths': 0}
state_data = pd.DataFrame(cases,columns=['State','Total_Cases'])
state_data.sort_values(by=['State'],inplace=True)
state_data.loc[state_data['State']=='Jammu and Kashmir','Total_Cases']+=const
# print(state_data)
# print('########################################')
# print(states.keys())
# print(len(states.keys()))
# print(type(state_geo_data))
# for i,_ in enumerate(state_geo_data['features']):
# state_geo_data['features'][i]['id'] = state_geo_data['features'][i]['properties']['NAME_1']
# #state_data['State'][i]=state_geo_data['features'][i]['id']
# print(state_geo_data['features'][i]['id'])
#state_data
folium.Choropleth(
geo_data=state_geo_data,
name='choropleth',
data=state_data,
columns=['State', 'Total_Cases'],
key_on= 'feature.properties.NAME_1',#'features.id',#'features.properties.NAME_1',
fill_color='YlOrRd',
fill_opacity=0.7,
line_opacity=0.9,
legend_name='Total Covid 19 cases in India'
).add_to(m)
m.save('frontend/assets/Map.html')
latest_states_df = | pd.DataFrame(latest_regional_data) | pandas.DataFrame |
import numpy as np
import pandas as pd
| pd.set_option('display.expand_frame_repr', False) | pandas.set_option |
import os
import numpy as np
import pandas as pd
from pipedown.cross_validation.splitters import RandomSplitter
from pipedown.dag import DAG
from pipedown.nodes.base import Input, Model, Node, Primary
from pipedown.nodes.filters import Collate, ItemFilter
from pipedown.nodes.metrics import MeanSquaredError
def test_dag_fit_run_and_fitrun():
run_list = []
fit_list = []
class MyLoader(Node):
def run(self, *args):
df = pd.DataFrame()
df["a"] = np.random.randn(10)
df["b"] = np.random.randn(10)
df["c"] = np.random.randn(10)
return df
class MyNode(Node):
def __init__(self, name):
self._name = name
def fit(self, X, y):
fit_list.append(self._name)
self.x_mean = X.mean()
def run(self, X, y):
run_list.append(self._name)
return X + self.x_mean, y
class MyDAG(DAG):
def nodes(self):
return {
"input": Input(),
"loader": MyLoader(),
"primary": Primary(["a", "b"], "c"),
"my_node1": MyNode("A"),
"my_node2": MyNode("B"),
}
def edges(self):
return {
"primary": {"test": "input", "train": "loader"},
"my_node1": "primary",
"my_node2": "my_node1",
}
# Fit
my_dag = MyDAG()
dag_outputs = my_dag.fit(outputs="my_node2")
assert dag_outputs is None
assert isinstance(fit_list, list)
assert isinstance(run_list, list)
assert len(fit_list) == 2
assert len(run_list) == 2
assert "A" in fit_list
assert "B" in fit_list
assert "A" in run_list
assert "B" in run_list
# Run
df = pd.DataFrame()
df["a"] = np.random.randn(5)
df["b"] = np.random.randn(5)
xo, yo = my_dag.run(inputs={"input": df}, outputs="my_node2")
assert isinstance(xo, pd.DataFrame)
assert xo.shape[0] == 5
assert yo is None
assert isinstance(fit_list, list)
assert isinstance(run_list, list)
assert len(fit_list) == 2
assert len(run_list) == 4
assert "A" in fit_list
assert "B" in fit_list
assert "A" in run_list[2:]
assert "B" in run_list[2:]
# Fit run
while len(fit_list) > 0:
fit_list.pop()
while len(run_list) > 0:
run_list.pop()
xo, yo = my_dag.fit_run(outputs="my_node2")
assert isinstance(xo, pd.DataFrame)
assert xo.shape[0] == 10
assert xo.shape[1] == 2
assert isinstance(yo, pd.Series)
assert yo.shape[0] == 10
assert isinstance(fit_list, list)
assert isinstance(run_list, list)
assert len(fit_list) == 2
assert len(run_list) == 2
assert "A" in fit_list
assert "B" in fit_list
assert "A" in run_list
assert "B" in run_list
def test_dag_default_outputs():
class MyLoader(Node):
def run(self, *args):
df = pd.DataFrame()
df["a"] = np.random.randn(10)
df["b"] = np.random.randn(10)
df["c"] = np.random.randn(10)
return df
class MyNode(Node):
def __init__(self, name):
self._name = name
def run(self, X, y):
return X + 1, y
class MyDAG(DAG):
def nodes(self):
return {
"input": Input(),
"loader": MyLoader(),
"primary": Primary(["a", "b"], "c"),
"item_filter_1": ItemFilter(lambda x: x["a"] < 3),
"item_filter_2": ItemFilter(
lambda x: (x["a"] >= 3) & (x["a"] < 10)
),
"my_node1": MyNode("A"),
"my_node2": MyNode("B"),
}
def edges(self):
return {
"primary": {"test": "input", "train": "loader"},
"item_filter_1": "primary",
"item_filter_2": "primary",
"my_node1": "item_filter_1",
"my_node2": "item_filter_2",
}
# During training, default outputs should be my_node1 and 2 (not Input)
my_dag = MyDAG()
my_dag.instantiate_dag("train")
def_outputs = my_dag.get_default_outputs("train")
assert isinstance(def_outputs, list)
assert len(def_outputs) == 2
assert "my_node1" in def_outputs
assert "my_node2" in def_outputs
# Same thing for test (output should not be loader)
my_dag = MyDAG()
my_dag.instantiate_dag("test")
def_outputs = my_dag.get_default_outputs("test")
assert isinstance(def_outputs, list)
assert len(def_outputs) == 2
assert "my_node1" in def_outputs
assert "my_node2" in def_outputs
def test_dag_eval_order_with_empty():
run_list = []
class MyNode(Node):
def __init__(self, name):
self._name = name
def run(self, X, y):
run_list.append(self._name)
return X + 1, y
class MyDAG(DAG):
def nodes(self):
return {
"input": Input(),
"primary": Primary(["a", "b"], "c"),
"item_filter_1": ItemFilter(lambda x: x["a"] < 3),
"item_filter_2": ItemFilter(
lambda x: (x["a"] >= 3) & (x["a"] < 10)
),
"my_node1": MyNode("A"),
"my_node2": MyNode("B"),
"collate": Collate(),
}
def edges(self):
return {
"primary": {"test": "input", "train": "input"},
"item_filter_1": "primary",
"item_filter_2": "primary",
"my_node1": "item_filter_1",
"my_node2": "item_filter_2",
"collate": ["my_node1", "my_node2"],
}
# Data split into two separate branches then recombined
df = pd.DataFrame()
df["a"] = [1, 2, 3, 4, 5, 6]
df["b"] = [10, 20, 30, 40, 50, 60]
df["c"] = [10, 20, 30, 40, 50, 60]
my_dag = MyDAG()
xo, yo = my_dag.run({"input": df})
assert len(run_list) == 2
assert "A" in run_list
assert "B" in run_list
assert isinstance(xo, pd.DataFrame)
assert xo.shape[0] == 6
assert xo.shape[1] == 2
assert xo["a"].iloc[0] == 2
assert xo["a"].iloc[1] == 3
assert xo["a"].iloc[2] == 4
assert xo["a"].iloc[3] == 5
assert xo["a"].iloc[4] == 6
assert xo["a"].iloc[5] == 7
assert xo["b"].iloc[0] == 11
assert xo["b"].iloc[1] == 21
assert xo["b"].iloc[2] == 31
assert xo["b"].iloc[3] == 41
assert xo["b"].iloc[4] == 51
assert xo["b"].iloc[5] == 61
# Reset the run list
while len(run_list) > 0:
run_list.pop()
# Data split into two separate branches but one is never executed
df = pd.DataFrame()
df["a"] = [1, 2, 1, 2, 1, 2]
df["b"] = [10, 20, 30, 40, 50, 60]
df["c"] = [10, 20, 30, 40, 50, 60]
my_dag = MyDAG()
xo, yo = my_dag.run({"input": df})
assert len(run_list) == 1
assert "A" in run_list
assert "B" not in run_list
assert isinstance(xo, pd.DataFrame)
assert xo.shape[0] == 6
assert xo.shape[1] == 2
assert xo["a"].iloc[0] == 2
assert xo["a"].iloc[1] == 3
assert xo["a"].iloc[2] == 2
assert xo["a"].iloc[3] == 3
assert xo["a"].iloc[4] == 2
assert xo["a"].iloc[5] == 3
assert xo["b"].iloc[0] == 11
assert xo["b"].iloc[1] == 21
assert xo["b"].iloc[2] == 31
assert xo["b"].iloc[3] == 41
assert xo["b"].iloc[4] == 51
assert xo["b"].iloc[5] == 61
# Reset the run list
while len(run_list) > 0:
run_list.pop()
# Same but now there's less data at the end
df = pd.DataFrame()
df["a"] = [1, 2, 1, 2, 10, 20]
df["b"] = [10, 20, 30, 40, 50, 60]
df["c"] = [10, 20, 30, 40, 50, 60]
my_dag = MyDAG()
xo, yo = my_dag.run({"input": df})
assert len(run_list) == 1
assert "A" in run_list
assert "B" not in run_list
assert isinstance(xo, pd.DataFrame)
assert xo.shape[0] == 4
assert xo.shape[1] == 2
assert xo["a"].iloc[0] == 2
assert xo["a"].iloc[1] == 3
assert xo["a"].iloc[2] == 2
assert xo["a"].iloc[3] == 3
assert xo["b"].iloc[0] == 11
assert xo["b"].iloc[1] == 21
assert xo["b"].iloc[2] == 31
assert xo["b"].iloc[3] == 41
def test_dag_get_and_save_html():
class MyLoader(Node):
def run(self, *args):
df = pd.DataFrame()
df["a"] = np.random.randn(10)
df["b"] = np.random.randn(10)
df["c"] = np.random.randn(10)
return df
class MyNode(Node):
def __init__(self, name):
self._name = name
def fit(self, X, y):
self.x_mean = X.mean()
def run(self, X, y):
return X + self.x_mean, y
class MyDAG(DAG):
def nodes(self):
return {
"input": Input(),
"loader": MyLoader(),
"primary": Primary(["a", "b"], "c"),
"my_node1": MyNode("A"),
"my_node2": MyNode("B"),
}
def edges(self):
return {
"primary": {"test": "input", "train": "loader"},
"my_node1": "primary",
"my_node2": "my_node1",
}
# Create the DAG
my_dag = MyDAG()
# Get the HTML
html = my_dag.get_html()
assert isinstance(html, str)
# Save the html
if os.path.exists("test_dag_viewer.html"):
os.remove("test_dag_viewer.html")
assert not os.path.exists("test_dag_viewer.html")
my_dag.save_html("test_dag_viewer.html")
assert os.path.exists("test_dag_viewer.html")
def test_cv_predict():
class MyLoader(Node):
def run(self, *args):
df = | pd.DataFrame() | pandas.DataFrame |
# Standard Library
import pandas as pd
import statistics as st
import numpy as np
import imdb
from datetime import datetime
from datetime import timedelta
import multiprocessing
import json
import time
import re
import random
import matplotlib.pyplot as plt
# Email Library
from email.mime.text import MIMEText as text
import smtplib
# Scraper Library
from bs4 import BeautifulSoup as soup # HTML data structure
from urllib.request import urlopen as uReq # Web client
from lxml import etree
# Twitter API Library
import tweepy
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
from tweepy import Stream
# Kafka Library
from kafka import KafkaProducer
from kafka import KafkaConsumer
# Pymongo Library
from pymongo import MongoClient
from pprint import pprint
# Pre-processing Library
from difflib import SequenceMatcher
import string
import unicodedata
import nltk
import contractions
import inflect
# Sentiment Analysis Library
from textblob import TextBlob
from nltk import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import *
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
from google.oauth2 import service_account
# No Warning
pd.options.mode.chained_assignment = None
#! YOU NEED ALSO HTMLIB5 INSTALLED, NOT NEED TO IMPORT IT
#!! YOU NEED TO HAVE COMPLETED NTLK INSTALLATION, INCLUDING "ntlk-download()"
def movie_title():
#selects yearly calendar
exit = 0
while exit != 1:
while True:
year_selected = str(input('Select which release calendar you want [2019/2020]: '))
if year_selected not in ("2019", "2020"):
print("Sorry, you selected a year out of range.")
continue
else:
break
while True:
print("You selected: "+year_selected+". Is that correct?")
yes_no = input('[y/n]: ')
if (yes_no == "y") or (yes_no == "n"):
break
else:
print("Sorry, you did not enter y or n.")
continue
while True:
if yes_no == "n":
is_looping = False
break
else:
exit = 1
break
print("Please wait...")
# URl to web scrap from.
page_url = "https://www.firstshowing.net/schedule"+year_selected
# opens the connection and downloads html page from url
uClient = uReq(page_url)
# parses html into a soup data structure to traverse html
# as if it were a json data type.
page_soup = soup(uClient.read(), "html.parser")
uClient.close()
# finds the container from the page
#containers = page_soup.findAll("p", {"class": "sched"})
containers = page_soup.findAll("div", {"class": "schedcontent"})
#How many movies are releasing from 20 dec to 17 jan? (variable)
#Create a dataframe which contains all movies release dates
movie_dates_list = []
datecontainer = page_soup.findAll("h4")
y=0
for container in datecontainer:
date = container.strong.text
y += 1
movie_dates_list.append([date])
movie_dates = | pd.DataFrame(movie_dates_list, columns=["dates"]) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = | Series([2, 3, 4]) | pandas.Series |