file
stringlengths
6
44
content
stringlengths
38
162k
__init__.py
import os import sys sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__)))) from .bin.aion_pipeline import aion_train_model
aion.py
import argparse import sys import os import subprocess INSTALL = 'install' LINUXINSTALL = 'linuxinstall' FE_MIGRATE = 'migrateappfe' LAUNCH_KAFKA = 'launchkafkaconsumer' RUN_LOCAL_MLAC_PIPELINE = 'runpipelinelocal' BUILD_MLAC_CONTAINER = 'buildmlaccontainerlocal' CONVERT_MODEL = 'convertmodel' START_MLFLOW = 'mlflow' COMMON_SERVICE = 'service' TRAINING = 'training' TRAINING_AWS = 'trainingonaws' TRAINING_DISTRIBUTED = 'distributedtraining' START_APPF = 'appfe' ONLINE_TRAINING = 'onlinetraining' TEXT_SUMMARIZATION = 'textsummarization' GENERATE_MLAC = 'generatemlac' AWS_TRAINING = 'awstraining' LLAMA_7B_TUNING = 'llama7btuning' LLM_PROMPT = 'llmprompt' LLM_TUNING = 'llmtuning' LLM_PUBLISH = 'llmpublish' LLM_BENCHMARKING = 'llmbenchmarking' TELEMETRY_PUSH = 'pushtelemetry' def aion_aws_training(confFile): from hyperscalers.aion_aws_training import awsTraining status = awsTraining(confFile) print(status) def aion_training(confFile): from bin.aion_pipeline import aion_train_model status = aion_train_model(confFile) print(status) def aion_awstraining(config_file): from hyperscalers import aws_instance print(config_file) aws_instance.training(config_file) def aion_generatemlac(ConfFile): from bin.aion_mlac import generate_mlac_code status = generate_mlac_code(ConfFile) print(status) def aion_textsummarization(confFile): from bin.aion_text_summarizer import aion_textsummary status = aion_textsummary(confFile) def aion_oltraining(confFile): from bin.aion_online_pipeline import aion_ot_train_model status = aion_ot_train_model(confFile) print(status) def do_telemetry_sync(): from appbe.telemetry import SyncTelemetry SyncTelemetry() def aion_llm_publish(cloudconfig,instanceid,hypervisor,model,usecaseid,region,image): from llm.llm_inference import LLM_publish LLM_publish(cloudconfig,instanceid,hypervisor,model,usecaseid,region,image) def aion_migratefe(operation): import os import sys os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfe.ux.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc argi=[] argi.append(os.path.abspath(__file__)) argi.append(operation) execute_from_command_line(argi) def aion_appfe(url,port): #manage_location = os.path.join(os.path.dirname(os.path.abspath(__file__)),'manage.py') #subprocess.check_call([sys.executable,manage_location, "runserver","%s:%s"%(url,port)]) import os import sys os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfe.ux.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc argi=[] argi.append(os.path.abspath(__file__)) argi.append('runaion') argi.append("%s:%s"%(url,port)) execute_from_command_line(argi) def aion_linux_install(version): from install import linux_dependencies linux_dependencies.process(version) def aion_install(version): from install import dependencies dependencies.process(version) def aion_service(ip,port,username,password): from bin.aion_service import start_server start_server(ip,port,username,password) def aion_distributedLearning(confFile): from distributed_learning import learning learning.training(confFile) def aion_launchkafkaconsumer(): from mlops import kafka_consumer kafka_consumer.launch_kafka_consumer() def aion_start_mlflow(): from appbe.dataPath import DEPLOY_LOCATION import platform import shutil from os.path import expanduser mlflowpath = os.path.normpath(os.path.join(os.path.dirname(__file__),'..','..','..','Scripts','mlflow.exe')) print(mlflowpath) home = expanduser("~") if platform.system() == 'Windows': DEPLOY_LOCATION = os.path.join(DEPLOY_LOCATION,'mlruns') outputStr = subprocess.Popen([sys.executable, mlflowpath,"ui", "--backend-store-uri","file:///"+DEPLOY_LOCATION]) else: DEPLOY_LOCATION = os.path.join(DEPLOY_LOCATION,'mlruns') subprocess.check_call(['mlflow',"ui","-h","0.0.0.0","--backend-store-uri","file:///"+DEPLOY_LOCATION]) def aion_model_conversion(config_file): from conversions import model_convertions model_convertions.convert(config_file) def aion_model_buildMLaCContainer(config): from mlops import build_container build_container.local_docker_build(config) def aion_model_runpipelinelocal(config): from mlops import local_pipeline local_pipeline.run_pipeline(config) def aion_llm_tuning(config): from llm.llm_tuning import run run(config) def aion_llm_prompt(cloudconfig,instanceid,prompt): from llm.aws_instance_api import LLM_predict LLM_predict(cloudconfig,instanceid,prompt) def llm_bench_marking(hypervisor,instanceid,model,usecaseid,eval): print(eval) from llm.bench_marking import bench_mark bench_mark(hypervisor,instanceid,model,usecaseid,eval) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-c', '--configPath', help='Config File Path') parser.add_argument('-i', '--instanceid', help='instanceid') parser.add_argument('-hv', '--hypervisor', help='hypervisor') parser.add_argument('-md', '--model', help='model') parser.add_argument('-uc', '--usecase', help='usecase') parser.add_argument('-cc', '--cloudConfigPath', help='Cloud Config File Path') parser.add_argument('-m', '--module', help='MODULE=TRAINING, APPFE, ONLINETRAINING,DISTRIBUTEDTRAINING') parser.add_argument('-ip', '--ipaddress', help='URL applicable only for APPFE method ') parser.add_argument('-p', '--port', help='APP Front End Port applicable only for APPFE method ') parser.add_argument('-ac', '--appfecommand', help='APP Front End Command ') parser.add_argument('-un','--username', help="USERNAME") parser.add_argument('-passw','--password', help="PASSWORD") parser.add_argument('-j', '--jsoninput', help='JSON Input') parser.add_argument('-v', '--version', help='Installer Version') parser.add_argument('-pf', '--prompt', help='Prompt File') parser.add_argument('-r', '--region', help='REGION NAME') parser.add_argument('-im', '--image', help='IMAGE NAME') parser.add_argument('-e', '--eval', help='evaluation for code or doc', default='doc') args = parser.parse_args() if args.module.lower() == TRAINING: aion_training(args.configPath) elif args.module.lower() == TRAINING_AWS: aion_awstraining(args.configPath) elif args.module.lower() == TRAINING_DISTRIBUTED: aion_distributedLearning(args.configPath) elif args.module.lower() == START_APPF: aion_appfe(args.ipaddress,args.port) elif args.module.lower() == ONLINE_TRAINING: aion_oltraining(args.configPath) elif args.module.lower() == TEXT_SUMMARIZATION: aion_textsummarization(args.configPath) elif args.module.lower() == GENERATE_MLAC: aion_generatemlac(args.configPath) elif args.module.lower() == COMMON_SERVICE: aion_service(args.ipaddress,args.port,args.username,args.password) elif args.module.lower() == START_MLFLOW: aion_mlflow() elif args.module.lower() == CONVERT_MODEL: aion_model_conversion(args.configPath) elif args.module.lower() == BUILD_MLAC_CONTAINER: aion_model_buildMLaCContainer(args.jsoninput) elif args.module.lower() == RUN_LOCAL_MLAC_PIPELINE: aion_model_runpipelinelocal(args.jsoninput) elif args.module.lower() == LAUNCH_KAFKA: aion_launchkafkaconsumer() elif args.module.lower() == INSTALL: aion_install(args.version) elif args.module.lower() == LINUXINSTALL: aion_linux_install(args.version) elif args.module.lower() == FE_MIGRATE: aion_migratefe('makemigrations') aion_migratefe('migrate') elif args.module.lower() == AWS_TRAINING: aion_aws_training(args.configPath) elif args.module.lower() == LLAMA_7B_TUNING: aion_llm_tuning(args.configPath) elif args.module.lower() == LLM_TUNING: aion_llm_tuning(args.configPath) elif args.module.lower() == LLM_PROMPT: aion_llm_prompt(args.cloudConfigPath,args.instanceid,args.prompt) elif args.module.lower() == LLM_PUBLISH: aion_llm_publish(args.cloudConfigPath,args.instanceid,args.hypervisor,args.model,args.usecase,args.region,args.image) elif args.module.lower() == LLM_BENCHMARKING: llm_bench_marking(args.hypervisor,args.instanceid,args.model,args.usecase, args.eval) elif args.module.lower() == TELEMETRY_PUSH: do_telemetry_sync()
aionMlopsService.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' # -*- coding: utf-8 -*- # -*- coding: utf-8 -*- import logging logging.getLogger('tensorflow').disabled = True import json import mlflow import mlflow.sklearn import mlflow.sagemaker as mfs # from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split # from sklearn import datasets import time import numpy as np # Load dataset # from sklearn.datasets import load_iris import pickle # Load the pickled model # from matplotlib import pyplot import sys import os import boto3 import subprocess import os.path from os.path import expanduser import platform from pathlib import Path class aionMlopsService: def __init__(self,model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experiment_name,mlflow_modelname,awsaccesskey_id,awssecretaccess_key,aws_session_token,mlflow_container_name,aws_region,aws_id,iam_sagemakerfullaccess_arn,sm_app_name,sm_deploy_option,delete_ecr_repository,ecrRepositoryName): try: self.model=model self.mlflowtosagemakerDeploy=mlflowtosagemakerDeploy self.mlflowtosagemakerPushOnly=str(mlflowtosagemakerPushOnly) self.mlflowtosagemakerPushImageName=str(mlflowtosagemakerPushImageName) self.mlflowtosagemakerdeployModeluri=str(mlflowtosagemakerdeployModeluri) self.experiment_name=experiment_name self.mlflow_modelname=mlflow_modelname self.awsaccesskey_id=awsaccesskey_id self.awssecretaccess_key=awssecretaccess_key self.aws_session_token=aws_session_token self.mlflow_container_name=mlflow_container_name self.aws_region=aws_region self.aws_id=aws_id self.iam_sagemakerfullaccess_arn=iam_sagemakerfullaccess_arn self.sm_app_name=sm_app_name self.sm_deploy_option=sm_deploy_option self.delete_ecr_repository=delete_ecr_repository self.ecrRepositoryName=ecrRepositoryName from appbe.dataPath import LOG_LOCATION sagemakerLogLocation = LOG_LOCATION try: os.makedirs(sagemakerLogLocation) except OSError as e: if (os.path.exists(sagemakerLogLocation)): pass else: raise OSError('sagemakerLogLocation error.') self.sagemakerLogLocation=str(sagemakerLogLocation) filename_mlops = 'mlopslog_'+str(int(time.time())) filename_mlops=filename_mlops+'.log' # filename = 'mlopsLog_'+Time() filepath = os.path.join(self.sagemakerLogLocation, filename_mlops) logging.basicConfig(filename=filepath, format='%(message)s',filemode='w') # logging.basicConfig(filename="uq_logging.log", format='%(asctime)s %(message)s',filemode='w') # logging.basicConfig(filename="uq_logging.log", format=' %(message)s',filemode='w') # logging.basicConfig(filename='uq_logging.log', encoding='utf-8', level=logging.INFO) self.log = logging.getLogger('aionMLOps') self.log.setLevel(logging.DEBUG) # mlflow.set_experiment(self.experiment_name) except Exception as e: self.log.info('<!------------- mlflow model INIT Error ---------------> '+str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def mlflowSetPath(self,path): track_dir=os.path.join(path,'mlruns') uri="file:"+str(Path(track_dir)) return uri #Currently not used this delete ecr repository option def ecr_repository_delete(self,rep_name): # import subprocess client = boto3.client('ecr') repositories = client.describe_repositories() ecr_delete_rep=client.delete_repository(registryId=self.aws_id,repositoryName=self.ecrRepositoryName,force=True) mlflow_ecr_delete=subprocess.run(['aws', 'ecr', 'delete-repository','--repository-name',rep_name,'||','true']) self.log.info('Success: deleted aws ecr repository which contains mlops image.') def check_sm_deploy_status(self,app_name): sage_client = boto3.client('sagemaker', region_name=self.aws_region) endpoint_description = sage_client.describe_endpoint(EndpointName=app_name) endpoint_status = endpoint_description["EndpointStatus"] try: failure_reason=endpoint_description["FailureReason"] self.log.info("sagemaker end point creation failure reason is: "+str(failure_reason)) except: pass endpoint_status=str(endpoint_status) return endpoint_status def invoke_sm_endpoint(self,app_name, input_json): client = boto3.session.Session().client("sagemaker-runtime", self.aws_region) response = client.invoke_endpoint( EndpointName=app_name, Body=input_json, ContentType='application/json; format=pandas-split', ) # preds = response['Body'].read().decode("ascii") preds = response['Body'].read().decode("ascii") preds = json.loads(preds) # print("preds: {}".format(preds)) return preds def predict_sm_app_endpoint(self,X_test): #print(X_test) import pandas as pd prediction=None AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id) AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key) AWS_SESSION_TOKEN=str(self.aws_session_token) region = str(self.aws_region) #Existing model deploy options # mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName) # mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri) try: import subprocess cmd = 'aws configure set region_name '+region os.system(cmd) cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID os.system(cmd) cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY os.system(cmd) ''' aws_region=subprocess.run(['aws', 'configure', 'set','region_name',region]) aws_accesskeyid=subprocess.run(['aws', 'configure', 'set','aws_access_key_id',AWS_ACCESS_KEY_ID]) aws_secretaccesskey=subprocess.run(['aws', 'configure', 'set','aws_secret_access_key',AWS_SECRET_ACCESS_KEY]) ''' except: pass #Create a session for aws communication using aws boto3 lib # s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) # s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY) session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) #X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=2) # query_input = pd.DataFrame(X_test).iloc[[1,5]].to_json(orient="split") try: query_input = pd.DataFrame(X_test).to_json(orient="split") #print(query_input) prediction = self.invoke_sm_endpoint(app_name=self.sm_app_name, input_json=query_input) # self.log.info("sagemaker end point Prediction: \n"+str(prediction)) except Exception as e: print(e) return prediction def deleteSagemakerApp(self,app_name,region): # import mlflow.sagemaker as mfs # region = 'ap-south-1' # app_name = 'aion-demo-app' mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300) # print("AION mlops sagemaker application endpoint is deleted....\n") self.log.info('AION mlops sagemaker application endpoint is deleted, application name is: '+str(app_name)) def deployModel2sagemaker(self,mlflow_container_name,tag_id,model_path): region = str(self.aws_region) aws_id = str(self.aws_id) iam_sagemakerfullaccess_arn = str(self.iam_sagemakerfullaccess_arn) app_name = str(self.sm_app_name) model_uri = str(model_path) app_status=False mlflow_root_dir = None try: os.chdir(str(self.sagemakerLogLocation)) mlflow_root_dir = os.getcwd() self.log.info('mlflow root dir: '+str(mlflow_root_dir)) except: self.log.info("path issue.") try: c_status=self.check_sm_deploy_status(app_name) #if ((c_status == "Failed") or (c_status == "OutOfService")): if ((c_status == "Failed") or (c_status.lower() == "failed")): app_status=False self.log.info("Sagemaker endpoint status: Failed.\n") mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300) elif ((c_status.lower() == "inservice") or (c_status == "InService")): app_status=True self.log.info("Sagemaker endpoint status: InService. Running sagemaker endpoint name: \n"+str(app_name)) else: app_status=False pass except: # print("deploy status error.\n") pass #aws ecr model app_name should contain only [[a-zA-Z0-9-]] import re if app_name: pattern = re.compile("[A-Za-z0-9-]+") # if found match (entire string matches pattern) if pattern.fullmatch(app_name) is not None: #print("Found match: ") pass else: app_name = 'aion-demo-app' else: app_name = 'aion-demo-app' mlflow_image=mlflow_container_name+':'+tag_id image_url = aws_id + '.dkr.ecr.' + region + '.amazonaws.com/' + mlflow_image deploy_option="create" self.log.info('deploy_option: \n'+str(deploy_option)) if (deploy_option.lower() == "create"): # Other deploy modes: mlflow.sagemaker.DEPLOYMENT_MODE_ADD,mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE if not (app_status): try: mfs.deploy(app_name=app_name,model_uri=model_uri,region_name=region,mode="create",execution_role_arn=iam_sagemakerfullaccess_arn,image_url=image_url) self.log.info('sagemaker endpoint created and model deployed. Application name is: \n'+str(app_name)) except: self.log.info('Creating end point application issue.Please check the connection and aws credentials \n') else: self.log.info('Sagemaker application with user endpoint name already running.Please check. Please delete the old endpoint with same name.\n') elif (deploy_option.lower() == "delete"): # import mlflow.sagemaker as mfs # # region = 'ap-south-1' # # app_name = 'aion-demo-app' # mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300) # print("Mlflow sagemaker application endpoint is deleted....\n") # self.log.info('Mlflow sagemaker application endpoint is deleted, application name is: '+str(app_name)) pass elif (deploy_option.lower() == "add"): pass elif (deploy_option.lower() == "replace"): pass else: pass return app_status def mlflow2sagemaker_deploy(self): self.log.info('<!------------- Inside AION mlops to sagemaker communication and deploy process. ---------------> ') deploy_status=False app_name = str(self.sm_app_name) self.log.info('Sagemaker Application Name: '+str(app_name)) uri_mlflow=self.mlflowSetPath(self.sagemakerLogLocation) mlflow.set_tracking_uri(uri_mlflow) mlops_trackuri=mlflow.get_tracking_uri() mlops_trackuri=str(mlops_trackuri) self.log.info('mlops tracking uri: '+str(mlops_trackuri)) localhost_deploy=False try: #Loading aion model to deploy in sagemaker mlflow.set_experiment(self.experiment_name) self.log.info('Endpoint Name: '+str(self.experiment_name)) # Assume, the model already loaded from joblib in aionmlflow2smInterface.py file. aionmodel2deploy=self.model # run_id = None # experiment_id=None # Use the loaded pickled model to make predictions # pred = knn_from_pickle.predict(X_test) with mlflow.start_run(run_name='AIONMLOps') as run: # aionmodel2deploy.fit(X_train, y_train) # predictions = aionmodel2deploy.predict(X_test) mlflow.sklearn.log_model(aionmodel2deploy, self.mlflow_modelname) run_id = run.info.run_uuid experiment_id = run.info.experiment_id self.log.info('AION mlops experiment run_id: '+str(run_id)) self.log.info('AION mlops experiment experiment_id: '+str(experiment_id)) self.log.info('AION mlops experiment model_name: '+str(self.mlflow_modelname)) artifact_uri = {mlflow.get_artifact_uri()} # print("1.artifact_uri: \n",artifact_uri) mlflow.end_run() #If we need, we can check the mlflow experiments. # try: # mlflow_client = mlflow.tracking.MlflowClient('./mlruns') # exp_list = mlflow_client.list_experiments() # except: # pass #print("mlflow exp_list: \n",exp_list) mlflow_modelname=str(self.mlflow_modelname) mlops_trackuri=mlops_trackuri.replace('file:','') mlops_trackuri=str(mlops_trackuri) # mlflow_root_dir = os.getcwd() mlflow_root_dir = None try: os.chdir(str(self.sagemakerLogLocation)) mlflow_root_dir = os.getcwd() self.log.info('mlflow root dir: '+str(mlflow_root_dir)) except: self.log.info("path issue.") model_path = 'mlruns/%s/%s/artifacts/%s' % (experiment_id, run_id,self.mlflow_modelname) # model_path=mlops_trackuri+'\\%s\\%s\\artifacts\\%s' % (experiment_id, run_id,mlflow_modelname) self.log.info("local host aion mlops model_path is: "+str(model_path)) time.sleep(2) #print("Environment variable setup in the current working dir for aws sagemaker cli connection... \n") self.log.info('Environment variable setup in the current working dir for aws sagemaker cli connection... \n ') AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id) AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key) AWS_SESSION_TOKEN=str(self.aws_session_token) region = str(self.aws_region) #Existing model deploy options mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName) mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri) import subprocess cmd = 'aws configure set region_name '+region os.system(cmd) cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID os.system(cmd) cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY os.system(cmd) #Create a session for aws communication using aws boto3 lib # s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) # s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY) session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) # session = boto3.session.Session( # aws_access_key_id=AWS_ACCESS_KEY_ID, # aws_secret_access_key=AWS_SECRET_ACCESS_KEY, # aws_session_token=AWS_SESSION_TOKEN # ) # awsclient = session.resource('ecr') # s3 = session.resource('s3') self.log.info('aws environment variable setup done... \n') try: os.chdir(mlflow_root_dir) except FileNotFoundError: self.log.info('Directory does not exist. '+str(mlflow_root_dir)) except NotADirectoryError: self.log.info('model_path is not a directory. '+str(mlflow_root_dir)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir)) mlflow_container_name=str(self.mlflow_container_name) mlflow_version=mlflow.__version__ tag_id=mlflow_version if (self.mlflowtosagemakerPushOnly.lower() == "true"): self.log.info('Selected option is <Deploy existing model to sagemaker> \n') aws_id=str(self.aws_id) arn=str(self.iam_sagemakerfullaccess_arn) mlflow_image=mlflow_container_name+':'+tag_id image_url = aws_id+'.dkr.ecr.'+region+'.amazonaws.com/'+mlflow_image # print("image_url:========= \n",image_url) deploy_status=True try: model_path=mlflowtosagemakerdeployModeluri # ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns. self.log.info('Deploy existing model container-Model path given by user: '+str(model_path)) try: os.chdir(model_path) except FileNotFoundError: self.log.info('Directory does not exist. '+str(model_path)) except NotADirectoryError: self.log.info('model_path is not a directory. '+str(model_path)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(model_path)) try: mfs.push_image_to_ecr(image=mlflowtosagemakerPushImageName) deploy_status=True self.log.info('AION mlops pushed the docker container to aws ecr. \n ') except: self.log.info("error in pushing existing container to ecr.\n") deploy_status=False time.sleep(2) #Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir. try: # print(" Changing directory to mlflow root dir....\n") os.chdir(mlflow_root_dir) except FileNotFoundError: self.log.info('model path is not a directory. '+str(mlflow_root_dir)) except NotADirectoryError: self.log.info('model path is not a directory. '+str(mlflow_root_dir)) # print("{0} is not a directory".format(mlflow_root_dir)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir)) # self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri) try: if (deploy_status): self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri) self.log.info('AION creates docker container and push the container into aws ecr.. ') time.sleep(2) except: self.log.info('AION deploy error.check connection and aws config parameters. ') deploy_status=False # self.log.info('model deployed in sagemaker. ') except Exception as e: self.log.info('AION mlops failed to push docker container in aws ecr, check configuration parameters. \n'+str(e)) elif (self.mlflowtosagemakerPushOnly.lower() == "false"): if (self.mlflowtosagemakerDeploy.lower() == "true"): self.log.info('Selected option is <Create and Deploy model> \n') deploy_status=True try: # ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns. try: os.chdir(model_path) except FileNotFoundError: self.log.info('Directory does not exist. '+str(model_path)) except NotADirectoryError: self.log.info('model_path is not a directory. '+str(model_path)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(model_path)) try: mlflow_container_push=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--push','--container',mlflow_container_name]) self.log.info('AION mlops creates docker container and push the container into aws ecr.. ') deploy_status=True time.sleep(2) except: self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.') deploy_status=False self.log.info('Now deploying the model container to sagemaker starts....\n ') # Once docker push completes, again going back to mlflow parent dir for deployment #Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir. try: os.chdir(mlflow_root_dir) except FileNotFoundError: self.log.info('model_path does not exist. '+str(mlflow_root_dir)) except NotADirectoryError: self.log.info('model_path is not a directory. '+str(mlflow_root_dir)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir)) # app_name = str(self.sm_app_name) try: if (deploy_status): self.deployModel2sagemaker(mlflow_container_name,tag_id,model_path) except: self.log.info('mlops deploy error.check connection') deploy_status=False except Exception as e: exc = {"status":"FAIL","message":str(e).strip('"')} out_exc = json.dumps(exc) self.log.info('mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\n') elif(self.mlflowtosagemakerDeploy.lower() == "false"): deploy_status=False localhost_deploy=True self.log.info('Selected option is <Create AION mlops container in local host .> \n') self.log.info("User selected create-Deploy sagemaker option as False,") self.log.info("Creates the AION mlops-sagemaker container locally starting,but doesn't push into aws ecr and deploy in sagemaker. Check the container in docker repository. ") try: # ##We need to run AION mlops docker container command in the artifacts->model directory inside mlruns. try: os.chdir(model_path) self.log.info('After change to AION mlops model dir, cwd: '+str(model_path)) except FileNotFoundError: self.log.info('Directory does not exist. '+str(model_path)) except NotADirectoryError: self.log.info('model_path is not a directory. '+str(model_path)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(model_path)) # mlflow_container_local=subprocess.run(['AION mlops', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name]) try: if not (deploy_status): mlflow_container_local=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name]) self.log.info('AION creates local host bsed docker container and push the container local docker repository. Check with <docker images> command.\n ') localhost_deploy=True time.sleep(2) except: self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.') deploy_status=False localhost_deploy=False # print("AION mlops creates docker container and push the container into aws ecr.\n") self.log.info('AION mlops creates docker container and stored locally... ') time.sleep(2) except Exception as e: localhost_deploy=False # print("mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\n") self.log.info('AION mlops failed to creates docker container in local machine.\n'+str(e)) else: self.log.info('Deploy option not selected, Please check. ') localhost_deploy=False deploy_status=False else: pass localhost_container_status="Notdeployed" mlflow2sm_deploy_status="Notdeployed" if localhost_deploy: localhost_container_status="success" mlflow2sm_deploy_status="Notdeployed" # print("AION creates local docker container successfully.Please check in docker repository.") self.log.info("AION creates local docker container successfully.Please check in docker repository.") # else: # localhost_container_status="failed" # # print("AION failed to create local docker container successfully.Please check in docker repository.") # self.log.info("AION failed to create local docker container successfully.Please check in docker repository.") if (deploy_status): # Finally checking whether mlops model is deployed to sagemaker or not. app_name = str(self.sm_app_name) deploy_s = self.check_sm_deploy_status(app_name) if (deploy_s == "InService"): # print("AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\n") self.log.info('AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\n'+str(app_name)) mlflow2sm_deploy_status="success" localhost_container_status="Notdeployed" else: # print("AION Mlflow model not able to deploy at aws sagemaker\n") self.log.info('AION mlops model not able to deploy at aws sagemaker.\n') mlflow2sm_deploy_status="failed" localhost_container_status="Notdeployed" # else: # mlflow2sm_deploy_status="None" return mlflow2sm_deploy_status,localhost_container_status except Exception as inst: exc = {"status":"FAIL","message":str(inst).strip('"')} out_exc = json.dumps(exc)
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
aion_pipeline.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import sys import json import datetime, time, timeit import argparse import logging logging.getLogger('tensorflow').disabled = True import math import shutil import re from datetime import datetime as dt import warnings from config_manager.pipeline_config import AionConfigManager import pandas as pd import numpy as np import sklearn import string from records import pushrecords import logging from pathlib import Path from pytz import timezone from config_manager.config_gen import code_configure import joblib from sklearn.model_selection import train_test_split from config_manager.check_config import config_validate from utils.file_ops import save_csv_compressed,save_csv,save_chromadb LOG_FILE_NAME = 'model_training_logs.log' if 'AION' in sys.modules: try: from appbe.app_config import DEBUG_ENABLED except: DEBUG_ENABLED = False else: DEBUG_ENABLED = True def getversion(): configFolder = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','config') version = 'NA' for file in os.listdir(configFolder): if file.endswith(".var"): version = file.rsplit('.', 1) version = version[0] break return version AION_VERSION = getversion() def pushRecordForTraining(): try: status,msg = pushrecords.enterRecord(AION_VERSION) except Exception as e: print("Exception", e) status = False msg = str(e) return status,msg def mlflowSetPath(path,experimentname): import mlflow url = "file:" + str(Path(path).parent.parent) + "/mlruns" mlflow.set_tracking_uri(url) mlflow.set_experiment(str(experimentname)) def set_log_handler( basic, mode='w'): deploy_loc = Path(basic.get('deployLocation')) log_file_parent = deploy_loc/basic['modelName']/basic['modelVersion']/'log' log_file_parent.mkdir(parents=True, exist_ok=True) log_file = log_file_parent/LOG_FILE_NAME filehandler = logging.FileHandler(log_file, mode,'utf-8') formatter = logging.Formatter('%(message)s') filehandler.setFormatter(formatter) log = logging.getLogger('eion') log.propagate = False for hdlr in log.handlers[:]: # remove the existing file handlers if isinstance(hdlr,logging.FileHandler): log.removeHandler(hdlr) log.addHandler(filehandler) log.setLevel(logging.INFO) return log class server(): def __init__(self): self.response = None self.features=[] self.mFeatures=[] self.emptyFeatures=[] self.textFeatures=[] self.vectorizerFeatures=[] self.wordToNumericFeatures=[] self.profilerAction = [] self.targetType = '' self.matrix1='{' self.matrix2='{' self.matrix='{' self.trainmatrix='{' self.numericalFeatures=[] self.nonNumericFeatures=[] self.similarGroups=[] self.dfcols=0 self.dfrows=0 self.method = 'NA' self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] self.modelSelTopFeatures=[] self.topFeatures=[] self.allFeatures=[] def startScriptExecution(self, config_obj, codeConfigure, log): oldStdout = sys.stdout model_training_details = '' model_tried='' learner_type = '' topics = {} pred_filename = '' numericContinuousFeatures='' discreteFeatures='' sessonal_freq = '' additional_regressors = '' threshold=-1 targetColumn = '' numericalFeatures ='' nonNumericFeatures='' categoricalFeatures='' dataFolderLocation = '' featureReduction = 'False' original_data_file = '' normalizer_pickle_file = '' pcaModel_pickle_file = '' bpca_features= [] apca_features = [] lag_order = 1 profiled_data_file = '' trained_data_file = '' predicted_data_file='' dictDiffCount={} cleaning_kwargs = {} grouperbyjson = '' rowfilterexpression='' featureEngineeringSelector = 'false' conversion_method = '' params={} loss_matrix='binary_crossentropy' optimizer='Nadam' numericToLabel_json='[]' preprocessing_pipe='' firstDocFeature = '' secondDocFeature = '' padding_length = 30 pipe = None scalertransformationFile=None column_merge_flag = False merge_columns = [] score = 0 profilerObj = None imageconfig='' labelMaps={} featureDataShape=[] normFeatures = [] preprocess_out_columns = [] preprocess_pipe = None label_encoder = None unpreprocessed_columns = [] import pickle iterName,iterVersion,dataLocation,deployLocation,delimiter,textqualifier = config_obj.getAIONLocationSettings() inlierLabels=config_obj.getEionInliers() scoreParam = config_obj.getScoringCreteria() noofforecasts = config_obj.getNumberofForecasts() datetimeFeature,indexFeature,modelFeatures=config_obj.getFeatures() filter_expression = config_obj.getFilterExpression() refined_filter_expression = "" sa_images = [] model_tried = '' deploy_config = {} iterName = iterName.replace(" ", "_") deployFolder = deployLocation usecaseLocation,deployLocation,dataFolderLocation,imageFolderLocation,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,logFileName,outputjsonFile,reduction_data_file = config_obj.createDeploymentFolders(deployFolder,iterName,iterVersion) outputLocation=deployLocation mlflowSetPath(deployLocation,iterName+'_'+iterVersion) # mlflowSetPath shut down the logger, so set again set_log_handler( config_obj.basic, mode='a') xtrain=pd.DataFrame() xtest=pd.DataFrame() log.info('Status:-|... AION Training Configuration started') startTime = timeit.default_timer() try: output = {'bestModel': '', 'bestScore': 0, 'bestParams': {}} problem_type,targetFeature,profiler_status,selector_status,learner_status,deeplearner_status,timeseriesStatus,textsummarizationStatus,survival_analysis_status,textSimilarityStatus,inputDriftStatus,outputDriftStatus,recommenderStatus,visualizationstatus,deploy_status,associationRuleStatus,imageClassificationStatus,forecastingStatus, objectDetectionStatus,stateTransitionStatus, similarityIdentificationStatus,contextualSearchStatus,anomalyDetectionStatus = config_obj.getModulesDetails() status, error_id, msg = config_obj.validate_config() if not status: if error_id == 'fasttext': raise ValueError(msg) VideoProcessing = False if(problem_type.lower() in ['classification','regression']): if(targetFeature == ''): output = {"status":"FAIL","message":"Target Feature is Must for Classification and Regression Problem Type"} return output from transformations.dataReader import dataReader objData = dataReader() DataIsFolder = False folderdetails = config_obj.getFolderSettings() if os.path.isfile(dataLocation): log.info('Status:-|... AION Loading Data') dataFrame = objData.csvTodf(dataLocation,delimiter,textqualifier) status,msg = save_csv_compressed(dataFrame,original_data_file) if not status: log.info('CSV File Error: '+str(msg)) elif os.path.isdir(dataLocation): if problem_type.lower() == 'summarization': from document_summarizer import summarize keywords, pretrained_type, embedding_sz = summarize.get_params() dataFrame = summarize.to_dataframe(dataLocation,keywords, deploy_loc, pretrained_type, embedding_sz) problem_type = 'classification' targetFeature = 'label' scoreParam = 'Accuracy' elif folderdetails['fileType'].lower() == 'document': dataFrame, error = objData.documentsTodf(dataLocation, folderdetails['labelDataFile']) if error: log.info(error) elif folderdetails['fileType'].lower() == 'object': testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati intermediateLocation = os.path.join(deployLocation,'intermediate') os.mkdir(intermediateLocation) AugEnabled,keepAugImages,operations,augConf = config_obj.getEionImageAugmentationConfiguration() dataFrame, n_class = objData.createTFRecord(dataLocation, intermediateLocation, folderdetails['labelDataFile'], testPercentage,AugEnabled,keepAugImages,operations, "objectdetection",augConf) #Unnati DataIsFolder = True else: datafilelocation = os.path.join(dataLocation,folderdetails['labelDataFile']) dataFrame = objData.csvTodf(datafilelocation,delimiter,textqualifier) DataIsFolder = True if textSimilarityStatus or similarityIdentificationStatus or contextualSearchStatus: similaritydf = dataFrame filter = config_obj.getfilter() if filter != 'NA': dataFrame,rowfilterexpression = objData.rowsfilter(filter,dataFrame) timegrouper = config_obj.gettimegrouper() grouping = config_obj.getgrouper() if grouping != 'NA': dataFrame,grouperbyjson = objData.grouping(grouping,dataFrame) elif timegrouper != 'NA': dataFrame,grouperbyjson = objData.timeGrouping(timegrouper,dataFrame) if timeseriesStatus or anomalyDetectionStatus: from utils.validate_inputs import dataGarbageValue status,msg = dataGarbageValue(dataFrame,datetimeFeature) if status.lower() == 'error': raise ValueError(msg) if not DataIsFolder: if timeseriesStatus: if(modelFeatures != 'NA' and datetimeFeature != ''): if datetimeFeature: if isinstance(datetimeFeature, list): #to handle if time series having multiple time column unpreprocessed_columns = unpreprocessed_columns + datetimeFeature else: unpreprocessed_columns = unpreprocessed_columns + datetimeFeature.split(',') if datetimeFeature not in modelFeatures: modelFeatures = modelFeatures+','+datetimeFeature dataFrame = objData.removeFeatures(dataFrame,'NA',indexFeature,modelFeatures,targetFeature) elif survival_analysis_status or anomalyDetectionStatus: if(modelFeatures != 'NA'): if datetimeFeature != 'NA' and datetimeFeature != '': unpreprocessed_columns = unpreprocessed_columns + datetimeFeature.split(',') if datetimeFeature not in modelFeatures: modelFeatures = modelFeatures+','+datetimeFeature dataFrame = objData.removeFeatures(dataFrame,'NA',indexFeature,modelFeatures,targetFeature) else: dataFrame = objData.removeFeatures(dataFrame,datetimeFeature,indexFeature,modelFeatures,targetFeature) log.info('\n-------> First Ten Rows of Input Data: ') log.info(dataFrame.head(10)) self.dfrows=dataFrame.shape[0] self.dfcols=dataFrame.shape[1] log.info('\n-------> Rows: '+str(self.dfrows)) log.info('\n-------> Columns: '+str(self.dfcols)) topFeatures=[] profilerObj = None normalizer=None dataLoadTime = timeit.default_timer() - startTime log.info('-------> COMPUTING: Total dataLoadTime time(sec) :'+str(dataLoadTime)) if timeseriesStatus: if datetimeFeature != 'NA' and datetimeFeature != '': preproces_config = config_obj.basic.get('preprocessing',{}).get('timeSeriesForecasting',{}) if preproces_config: from transformations.preprocess import timeSeries as ts_preprocess preprocess_obj = ts_preprocess( preproces_config,datetimeFeature, log) dataFrame = preprocess_obj.run( dataFrame) log.info('-------> Input dataFrame(5 Rows) after preprocessing: ') log.info(dataFrame.head(5)) deploy_config['preprocess'] = {} deploy_config['preprocess']['code'] = preprocess_obj.get_code() if profiler_status: log.info('\n================== Data Profiler has started ==================') log.info('Status:-|... AION feature transformation started') from transformations.dataProfiler import profiler as dataProfiler dp_mlstart = time.time() profilerJson = config_obj.getEionProfilerConfigurarion() log.info('-------> Input dataFrame(5 Rows): ') log.info(dataFrame.head(5)) log.info('-------> DataFrame Shape (Row,Columns): '+str(dataFrame.shape)) testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati if DataIsFolder: if folderdetails['type'].lower() != 'objectdetection': profilerObj = dataProfiler(dataFrame) topFeatures,VideoProcessing,tfrecord_directory = profilerObj.folderPreprocessing(dataLocation,folderdetails,deployLocation) elif textSimilarityStatus: firstDocFeature = config_obj.getFirstDocumentFeature() secondDocFeature = config_obj.getSecondDocumentFeature() profilerObj = dataProfiler(dataFrame,targetFeature, data_path=dataFolderLocation) dataFrame,pipe,targetColumn,topFeatures = profilerObj.textSimilarityStartProfiler(firstDocFeature,secondDocFeature) elif recommenderStatus: profilerObj = dataProfiler(dataFrame) dataFrame = profilerObj.recommenderStartProfiler(modelFeatures) else: if deeplearner_status or learner_status: if (problem_type.lower() != 'clustering') and (problem_type.lower() != 'topicmodelling'): if targetFeature != '': try: biasingDetail = config_obj.getDebiasingDetail() if len(biasingDetail) > 0: if biasingDetail['FeatureName'] != 'None': protected_feature = biasingDetail['FeatureName'] privileged_className = biasingDetail['ClassName'] target_feature = biasingDetail['TargetFeature'] algorithm = biasingDetail['Algorithm'] from debiasing.DebiasingManager import DebiasingManager mgrObj = DebiasingManager() log.info('Status:-|... Debiasing transformation started') transf_dataFrame = mgrObj.Bias_Mitigate(dataFrame, protected_feature, privileged_className, target_feature, algorithm) log.info('Status:-|... Debiasing transformation completed') dataFrame = transf_dataFrame except Exception as e: print(e) pass # ---------------------------------------------- ---------------------------------------------- targetData = dataFrame[targetFeature] featureData = dataFrame[dataFrame.columns.difference([targetFeature])] testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati xtrain,ytrain,xtest,ytest = self.split_into_train_test_data(featureData,targetData,testPercentage,log,problem_type.lower()) xtrain.reset_index(drop=True,inplace=True) ytrain.reset_index(drop=True,inplace=True) xtest.reset_index(drop=True,inplace=True) ytest.reset_index(drop=True,inplace=True) dataFrame = xtrain dataFrame[targetFeature] = ytrain encode_target_problems = ['classification','anomalyDetection', 'timeSeriesAnomalyDetection'] #task 11997 if problem_type == 'survivalAnalysis' and dataFrame[targetFeature].nunique() > 1: encode_target_problems.append('survivalAnalysis') if timeseriesStatus: #task 12627 calling data profiler without target feature specified separately (i.e) profiling is done for model features along with target features profilerObj = dataProfiler(dataFrame, config=profilerJson, keep_unprocessed = unpreprocessed_columns.copy(), data_path=dataFolderLocation) else: profilerObj = dataProfiler(dataFrame, target=targetFeature, encode_target= problem_type in encode_target_problems, config=profilerJson, keep_unprocessed = unpreprocessed_columns.copy(), data_path=dataFolderLocation) #task 12627 dataFrame, preprocess_pipe, label_encoder = profilerObj.transform() preprocess_out_columns = dataFrame.columns.tolist() if not timeseriesStatus: #task 12627 preprocess_out_columns goes as output_columns in target folder script/input_profiler.py, It should contain the target feature also as it is what is used for forecasting if targetFeature in preprocess_out_columns: preprocess_out_columns.remove(targetFeature) for x in unpreprocessed_columns: preprocess_out_columns.remove(x) if label_encoder: joblib.dump(label_encoder, Path(deployLocation)/'model'/'label_encoder.pkl') labelMaps = dict(zip(label_encoder.classes_, label_encoder.transform(label_encoder.classes_))) codeConfigure.update_config('train_features',list(profilerObj.train_features_type.keys())) codeConfigure.update_config('text_features',profilerObj.text_feature) self.textFeatures = profilerObj.text_feature deploy_config['profiler'] = {} deploy_config['profiler']['input_features'] = list(profilerObj.train_features_type.keys()) deploy_config['profiler']['output_features'] = preprocess_out_columns deploy_config['profiler']['input_features_type'] = profilerObj.train_features_type deploy_config['profiler']['word2num_features'] = profilerObj.wordToNumericFeatures deploy_config['profiler']['unpreprocessed_columns'] = unpreprocessed_columns deploy_config['profiler']['force_numeric_conv'] = profilerObj.force_numeric_conv if self.textFeatures: deploy_config['profiler']['conversion_method'] = config_obj.get_conversion_method() if anomalyDetectionStatus and datetimeFeature != 'NA' and datetimeFeature != '': if unpreprocessed_columns: dataFrame.set_index( unpreprocessed_columns[0], inplace=True) log.info('-------> Data Frame Post Data Profiling(5 Rows): ') log.info(dataFrame.head(5)) if not xtest.empty: if targetFeature != '': non_null_index = ytest.notna() ytest = ytest[non_null_index] xtest = xtest[non_null_index] if profilerObj.force_numeric_conv: xtest[ profilerObj.force_numeric_conv] = xtest[profilerObj.force_numeric_conv].apply(pd.to_numeric,errors='coerce') xtest.astype(profilerObj.train_features_type) if unpreprocessed_columns: xtest_unprocessed = xtest[unpreprocessed_columns] xtest = preprocess_pipe.transform(xtest) if not isinstance(xtest, np.ndarray): xtest = xtest.toarray() xtest = pd.DataFrame(xtest, columns=preprocess_out_columns) if unpreprocessed_columns: xtest[unpreprocessed_columns] = xtest_unprocessed if survival_analysis_status: xtest.astype({x:'float' for x in unpreprocessed_columns}) xtrain.astype({x:'float' for x in unpreprocessed_columns}) #task 11997 removed setting datetime column as index of dataframe code as it is already done before if label_encoder: ytest = label_encoder.transform(ytest) if preprocess_pipe: if self.textFeatures: from text.textProfiler import reset_pretrained_model reset_pretrained_model(preprocess_pipe) # pickle is not possible for fasttext model ( binary) joblib.dump(preprocess_pipe, Path(deployLocation)/'model'/'preprocess_pipe.pkl') self.features=topFeatures if targetColumn in topFeatures: topFeatures.remove(targetColumn) self.topFeatures=topFeatures if normalizer != None: normalizer_file_path = os.path.join(deployLocation,'model','normalizer_pipe.sav') normalizer_pickle_file = 'normalizer_pipe.sav' pickle.dump(normalizer, open(normalizer_file_path,'wb')) log.info('Status:-|... AION feature transformation completed') dp_mlexecutionTime=time.time() - dp_mlstart log.info('-------> COMPUTING: Total Data Profiling Execution Time '+str(dp_mlexecutionTime)) log.info('================== Data Profiling completed ==================\n') else: datacolumns=list(dataFrame.columns) if targetFeature in datacolumns: datacolumns.remove(targetFeature) if not timeseriesStatus and not anomalyDetectionStatus and not inputDriftStatus and not outputDriftStatus and not imageClassificationStatus and not associationRuleStatus and not objectDetectionStatus and not stateTransitionStatus and not textsummarizationStatus: self.textFeatures,self.vectorizerFeatures,pipe,column_merge_flag,merge_columns = profilerObj.checkForTextClassification(dataFrame) self.topFeatures =datacolumns if(pipe is not None): preprocessing_pipe = 'pppipe'+iterName+'_'+iterVersion+'.sav' ppfilename = os.path.join(deployLocation,'model','pppipe'+iterName+'_'+iterVersion+'.sav') pickle.dump(pipe, open(ppfilename, 'wb')) status, msg = save_csv_compressed(dataFrame,profiled_data_file) if not status: log.info('CSV File Error: ' + str(msg)) if selector_status: log.info("\n================== Feature Selector has started ==================") log.info("Status:-|... AION feature engineering started") fs_mlstart = time.time() selectorJson = config_obj.getEionSelectorConfiguration() if self.textFeatures: config_obj.updateFeatureSelection(selectorJson, codeConfigure, self.textFeatures) log.info("-------> For vectorizer 'feature selection' is disabled and all the features will be used for training") from feature_engineering.featureSelector import featureSelector selectorObj = featureSelector() dataFrame,targetColumn,self.topFeatures,self.modelSelTopFeatures,self.allFeatures,self.targetType,self.similarGroups,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,pcaModel,bpca_features,apca_features,featureEngineeringSelector = selectorObj.startSelector(dataFrame, selectorJson,self.textFeatures,targetFeature,problem_type) if(str(pcaModel) != 'None'): featureReduction = 'True' status, msg = save_csv(dataFrame,reduction_data_file) if not status: log.info('CSV File Error: ' + str(msg)) pcaFileName = os.path.join(deployLocation,'model','pca'+iterName+'_'+iterVersion+'.sav') pcaModel_pickle_file = 'pca'+iterName+'_'+iterVersion+'.sav' pickle.dump(pcaModel, open(pcaFileName, 'wb')) if not xtest.empty: xtest = pd.DataFrame(pcaModel.transform(xtest),columns= apca_features) if targetColumn in self.topFeatures: self.topFeatures.remove(targetColumn) fs_mlexecutionTime=time.time() - fs_mlstart log.info('-------> COMPUTING: Total Feature Selection Execution Time '+str(fs_mlexecutionTime)) log.info('================== Feature Selection completed ==================\n') log.info("Status:-|... AION feature engineering completed") if deeplearner_status or learner_status: log.info('Status:-|... AION training started') ldp_mlstart = time.time() balancingMethod = config_obj.getAIONDataBalancingMethod() from learner.machinelearning import machinelearning mlobj = machinelearning() modelType = problem_type.lower() targetColumn = targetFeature if modelType == "na": if self.targetType == 'categorical': modelType = 'classification' elif self.targetType == 'continuous': modelType = 'regression' else: modelType='clustering' datacolumns=list(dataFrame.columns) if targetColumn in datacolumns: datacolumns.remove(targetColumn) features =datacolumns featureData = dataFrame[features] if(modelType == 'clustering') or (modelType == 'topicmodelling'): xtrain = featureData ytrain = pd.DataFrame() xtest = featureData ytest = pd.DataFrame() elif (targetColumn!=''): xtrain = dataFrame[features] ytrain = dataFrame[targetColumn] else: pass categoryCountList = [] if modelType == 'classification': if(mlobj.checkForClassBalancing(ytrain) >= 1): xtrain,ytrain = mlobj.ExecuteClassBalancing(xtrain,ytrain,balancingMethod) valueCount=targetData.value_counts() categoryCountList=valueCount.tolist() ldp_mlexecutionTime=time.time() - ldp_mlstart log.info('-------> COMPUTING: Total Learner data preparation Execution Time '+str(ldp_mlexecutionTime)) if learner_status: base_model_score=0 log.info('\n================== ML Started ==================') log.info('-------> Memory Usage by DataFrame During Learner Status '+str(dataFrame.memory_usage(deep=True).sum())) mlstart = time.time() log.info('-------> Target Problem Type:'+ self.targetType) learner_type = 'ML' learnerJson = config_obj.getEionLearnerConfiguration() from learner.machinelearning import machinelearning mlobj = machinelearning() anomalyDetectionStatus = False anomalyMethod =config_obj.getEionanomalyModels() if modelType.lower() == "anomalydetection" or modelType.lower() == "timeseriesanomalydetection": #task 11997 anomalyDetectionStatus = True if anomalyDetectionStatus == True : datacolumns=list(dataFrame.columns) if targetColumn in datacolumns: datacolumns.remove(targetColumn) if datetimeFeature in datacolumns: datacolumns.remove(datetimeFeature) self.features = datacolumns from learner.anomalyDetector import anomalyDetector anomalyDetectorObj=anomalyDetector() model_type ="anomaly_detection" saved_model = model_type+'_'+iterName+'_'+iterVersion+'.sav' if problem_type.lower() == "timeseriesanomalydetection": #task 11997 anomalyconfig = config_obj.getAIONTSAnomalyDetectionConfiguration() modelType = "TimeSeriesAnomalyDetection" else: anomalyconfig = config_obj.getAIONAnomalyDetectionConfiguration() testPercentage = config_obj.getAIONTestTrainPercentage() ##Multivariate feature based anomaly detection status from gui (true/false) mv_featurebased_selection = config_obj.getMVFeaturebasedAD() mv_featurebased_ad_status=str(mv_featurebased_selection['uniVariate']) model,estimator,matrix,trainmatrix,score,labelMaps=anomalyDetectorObj.startanomalydetector(dataFrame,targetColumn,labelMaps,inlierLabels,learnerJson,model_type,saved_model,anomalyMethod,deployLocation,predicted_data_file,testPercentage,anomalyconfig,datetimeFeature,mv_featurebased_ad_status) #Unnati score = 'NA' if(self.matrix != '{'): self.matrix += ',' self.matrix += matrix if(self.trainmatrix != '{'): self.trainmatrix += ',' self.trainmatrix += trainmatrix scoreParam = 'NA' scoredetails = f'{{"Model":"{model}","Score":"{score}"}}' if model_tried != '': model_tried += ',' model_tried += scoredetails model = anomalyMethod else: log.info('-------> Target Problem Type:'+ self.targetType) log.info('-------> Target Model Type:'+ modelType) if(modelType == 'regression'): allowedmatrix = ['mse','r2','rmse','mae'] if(scoreParam.lower() not in allowedmatrix): scoreParam = 'mse' if(modelType == 'classification'): allowedmatrix = ['accuracy','recall','f1_score','precision','roc_auc'] if(scoreParam.lower() not in allowedmatrix): scoreParam = 'accuracy' scoreParam = scoreParam.lower() codeConfigure.update_config('scoring_criteria',scoreParam) modelParams,modelList = config_obj.getEionLearnerModelParams(modelType) status,model_type,model,saved_model,matrix,trainmatrix,featureDataShape,model_tried,score,filename,self.features,threshold,pscore,rscore,self.method,loaded_model,xtrain1,ytrain1,xtest1,ytest1,topics,params=mlobj.startLearning(learnerJson,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,self.topFeatures,self.modelSelTopFeatures,self.allFeatures,self.targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps,'MB',codeConfigure,featureEngineeringSelector,config_obj.getModelEvaluationConfig(),imageFolderLocation) #Getting model,data for ensemble calculation e_model=loaded_model base_model_score=score if(self.matrix != '{'): self.matrix += ',' if(self.trainmatrix != '{'): self.trainmatrix += ',' self.trainmatrix += trainmatrix self.matrix += matrix mlexecutionTime=time.time() - mlstart log.info('-------> Total ML Execution Time '+str(mlexecutionTime)) log.info('================== ML Completed ==================\n') if deeplearner_status: learner_type = 'DL' log.info('Status:- |... AION DL training started') from dlearning.deeplearning import deeplearning dlobj = deeplearning() from learner.machinelearning import machinelearning mlobj = machinelearning() log.info('\n================== DL Started ==================') dlstart = time.time() deeplearnerJson = config_obj.getEionDeepLearnerConfiguration() targetColumn = targetFeature method = deeplearnerJson['optimizationMethod'] optimizationHyperParameter = deeplearnerJson['optimizationHyperParameter'] cvSplit = optimizationHyperParameter['trainTestCVSplit'] roundLimit=optimizationHyperParameter['roundLimit'] if 'randomMethod' in optimizationHyperParameter: randomMethod = optimizationHyperParameter['randomMethod'] else: randomMethod = 'Quantum' modelType = problem_type.lower() modelParams = deeplearnerJson['modelParams'] modelParamsFile=deeplearnerJson['modelparamsfile'] if roundLimit =="": roundLimit=None else: roundLimit=int(roundLimit) if len(self.modelSelTopFeatures) !=0: dl_features=self.modelSelTopFeatures best_feature_model = 'ModelBased' elif len(self.topFeatures) != 0: dl_features=self.topFeatures if featureEngineeringSelector.lower() == 'true': best_feature_model = 'DimensionalityReduction' else: best_feature_model = 'StatisticalBased' elif len(self.allFeatures) != 0: dl_features=self.allFeatures best_feature_model = 'AllFeatures' else: datacolumns=list(dataFrame.columns) datacolumns.remove(targetColumn) dl_features =datacolumns best_feature_model = 'AllFeatures' log.info('-------> Features Used For Modeling: '+(str(dl_features))[:500]) if cvSplit == "": cvSplit =None else: cvSplit =int(cvSplit) xtrain = xtrain[dl_features] xtest = xtest[dl_features] df_test = xtest.copy() df_test['actual'] = ytest modelParams,modelList = config_obj.getEionDeepLearnerModelParams(modelType) if modelType.lower() == 'classification': scoreParam = dlobj.setScoreParams(scoreParam,modelType) featureDataShape = xtrain.shape model_type = 'Classification' log.info('\n------ Training DL: Classification ----') elif modelType.lower() == 'regression': model_type = "Regression" if scoreParam == 'None': scoreParam = None log.info('\n------ Training DL: Regression ----') featureDataShape = xtrain.shape model_dl,score_dl,best_model_dl,params_dl,X1,XSNN,model_tried_dl,loss_matrix,optimizer,saved_model_dl,filename_dl,dftrain,df_test,performancematrix,trainingperformancematrix = dlobj.startLearning(model_type,modelList, modelParams, scoreParam, cvSplit, xtrain,ytrain,xtest,ytest,method,randomMethod,roundLimit,labelMaps,df_test,deployLocation,iterName,iterVersion,best_feature_model) if model_tried != '': model_tried += ',' model_tried += model_tried_dl bestDL = True if learner_status: if score_dl <= score: bestDL = False log.info("\n----------- Machine Learning is Good ---") log.info("-------> Model: "+str(model) +" Score: "+str(score)) log.info("---------------------------------------\n") else: os.remove(filename) os.remove(predicted_data_file) log.info("\n------------ Deep Learning is Good---") log.info("-------> Model: "+str(model_dl)+" Score: "+str(score_dl)) log.info("---------------------------------------\n") if bestDL: model = model_dl score = score_dl best_model = best_model_dl params = params_dl filename = filename_dl status, msg = save_csv(df_test,predicted_data_file) if not status: log.info('CSV File Error: ' + str(msg)) saved_model = saved_model_dl self.matrix = '{'+performancematrix self.trainmatrix = '{'+trainingperformancematrix self.features = dl_features else: learner_type = 'ML' shutil.rmtree(filename_dl) dlexecutionTime=time.time() - dlstart log.info('-------> DL Execution Time '+str(dlexecutionTime)) log.info('Status:- |... AION DL training completed') log.info('================== Deep Completed ==================\n') if deeplearner_status or learner_status: log.info('Status:-|... AION training completed') if stateTransitionStatus: log.info('Status:-|... AION State Transition start') learner_type = modelType = model_type = 'StateTransition' model = 'MarkovClustering' scoreParam = 'NA' score = 0 from state_transition.pattern import pattern patternobj = pattern(modelFeatures,targetFeature) model_tried,probabilityfile,clusteringfile = patternobj.training(dataFrame,outputLocation) deploy_status = False visualizationstatus = False log.info('Status:-|... AION State Transition completed') if associationRuleStatus: log.info('\n================== Association Rule Started ==================') log.info('Status:-|... AION Association Rule start') learner_type = 'AR' modelType = 'Association Rule' model = 'apriori' scoreParam = 'NA' score = 'NA' model_type = modelType associationRuleJson = config_obj.getEionAssociationRuleConfiguration() modelparams,modelList = config_obj.getEionAssociationRuleModelParams() invoiceNoFeature,itemFeature = config_obj.getAssociationRuleFeatures() if model in modelparams: modelparam = modelparams[model] log.info('\n-------- Assciation Rule Start -----') from association_rules.associationrules import associationrules associationrulesobj = associationrules(dataFrame,associationRuleJson,modelparam,invoiceNoFeature,itemFeature) model_tried = associationrulesobj.apply_associationRules(outputLocation) log.info('-------- Association Rule End -----\n') log.info('<--------Association Rule Completed----->') log.info('Status:-|... AION Association Rule completed') deploy_status = False if textSimilarityStatus: log.info('================ Text Similarity Started ====================') log.info('Status:-|... AION Text Similarity started') learner_type = 'Text Similarity' model_type = 'Text Similarity' scoreParam = 'Accuracy' modelType = model_type firstDocFeature = config_obj.getFirstDocumentFeature() secondDocFeature = config_obj.getSecondDocumentFeature() textSimilarityCongig = config_obj.getEionTextSimilarityConfig() testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati from recommender.text_similarity import eion_similarity_siamese objTextSimilarity = eion_similarity_siamese() model,score,matrix,trainmatrix,modeltried,saved_model,filename,padding_length,threshold = objTextSimilarity.siamese_model(dataFrame,firstDocFeature,secondDocFeature,targetFeature,textSimilarityCongig,pipe,deployLocation,iterName,iterVersion,testPercentage,predicted_data_file) if(self.matrix != '{'): self.matrix += ',' self.matrix += matrix if model_tried != '': model_tried += ',' model_tried += modeltried if(self.trainmatrix != '{'): self.trainmatrix += ',' self.trainmatrix += trainmatrix log.info('Status:-|... AION Text Similarity completed') log.info('================ Text Similarity Started End====================') if timeseriesStatus: log.info('================ Time Series Forecasting Started ====================') #task 11997 log.info('Status:-|... AION TimeSeries Forecasting started') #task 11997 modelType = 'TimeSeriesForecasting' #task 11997 model_type = 'TimeSeriesForecasting' #task 11997 learner_type = 'TS' modelName='ARIMA' numericContinuousFeatures = targetFeature.split(",") profilerJson = config_obj.getEionTimeSeriesConfiguration() modelParams,modelList = config_obj.getEionTimeSeriesModelParams() modelName = modelList testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati from time_series.timeseries import timeseries allowedmatrix = ['mse','r2','rmse','mae'] if(scoreParam.lower() not in allowedmatrix): scoreParam = 'rmse' objTS = timeseries(profilerJson,modelParams,modelList,dataFrame,targetFeature,datetimeFeature,modelName,testPercentage,iterName,iterVersion,deployLocation,scoreParam) modelName,model,scoreParam,score,best_model,sfeatures,errormatrix,model_tried,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,scalertransformationFile = objTS.timeseries_learning(trained_data_file,predicted_data_file,deployLocation) xtrain = dataFrame self.matrix += errormatrix log.info("Best model to deploy: \n"+str(model)) ## Below part is for var,arima,fbprophet try: with open(filename, 'rb') as f: loaded_model = pickle.load(f) f.close() except: loaded_model=best_model pass df_l=len(dataFrame) pred_threshold=0.1 max_pred_by_user= round((df_l)*pred_threshold) #prediction for 24 steps or next 24 hours if noofforecasts == -1: noofforecasts = max_pred_by_user no_of_prediction=noofforecasts if (no_of_prediction > max_pred_by_user): log.info("-------> Forecast beyond the threshold.So, Reset to Maximum:" +str(max_pred_by_user)) no_of_prediction=max_pred_by_user noofforecasts = no_of_prediction log.info("-------> Number of Forecast Records: "+str(no_of_prediction)) log.info("\n------ Forecast Prediction Start -------------") if(model.lower() == 'var'): sfeatures.remove(datetimeFeature) self.features = sfeatures originalPredictions=objTS.var_prediction(no_of_prediction) log.info("-------> Predictions") log.info(originalPredictions) predictions=originalPredictions forecast_output = predictions.to_json(orient='records') else: if (model.lower() == 'fbprophet'): self.features = sfeatures if not pred_freq: sessonal_freq = 'H' else: sessonal_freq=pred_freq ts_prophet_future = best_model.make_future_dataframe(periods=no_of_prediction,freq=sessonal_freq,include_history = False) #If additional regressor given by user. if (additional_regressors): log.info("------->Prophet additional regressor given by user: "+str(additional_regressors)) ts_prophet_future[additional_regressors] = dataFrame[additional_regressors] ts_prophet_future.reset_index(drop=True) ts_prophet_future=ts_prophet_future.dropna() else: pass train_forecast = best_model.predict(ts_prophet_future) train_forecast = train_forecast.round(2) prophet_forecast_tail=train_forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']] prophet_forecast_tail['ds'] = prophet_forecast_tail['ds'].dt.strftime('%Y-%m-%d %H:%i:%s') log.info("------->Prophet Predictions") log.info(prophet_forecast_tail) forecast_output = prophet_forecast_tail.to_json(orient='records') elif (model.lower() == 'arima'): predictions = loaded_model.predict(n_periods=no_of_prediction) predictions = predictions.round(2) self.features = sfeatures col = targetFeature.split(",") pred = pd.DataFrame(predictions,columns=col) predictionsdf = pred log.info("-------> Predictions") log.info(predictionsdf) forecast_output = predictionsdf.to_json(orient='records') elif (model.lower() == 'encoder_decoder_lstm_mvi_uvo'): log.info(datetimeFeature) log.info(sfeatures) self.features = sfeatures if len(sfeatures) == 1: xt = xtrain[self.features].values else: xt = xtrain[self.features].values with open(scalertransformationFile, 'rb') as f: loaded_scaler_model = pickle.load(f) f.close() xt = xt.astype('float32') xt = loaded_scaler_model.transform(xt) pred_data = xt y_future = [] featuerlen = len(sfeatures) targetColIndx = (xtrain.columns.get_loc(targetFeature)) #in case of lstm multivariate input and univariate out prediction only one sequence can be predicted #consider the last xtrain window as input sequence pdata = pred_data[-lag_order:] pdata = pdata.reshape((1,lag_order, featuerlen)) pred = loaded_model.predict(pdata) pred_1d = pred.ravel() #pred_1d = pred_1d.reshape(len(pred_1d),1) pdata_2d = pdata.ravel().reshape(len(pdata) * lag_order, featuerlen) pdata_2d[:,targetColIndx] = pred_1d pred_2d_inv = loaded_scaler_model.inverse_transform(pdata_2d) predout = pred_2d_inv[:, targetColIndx] predout = predout.reshape(len(pred_1d),1) #y_future.append(predout) col = targetFeature.split(",") pred = pd.DataFrame(index=range(0,len(predout)),columns=col) for i in range(0, len(predout)): pred.iloc[i] = predout[i] predictions = pred log.info("-------> Predictions") log.info(predictions) forecast_output = predictions.to_json(orient='records') elif (model.lower() == 'mlp' or model.lower() == 'lstm'): sfeatures.remove(datetimeFeature) self.features = sfeatures if len(sfeatures) == 1: xt = xtrain[self.features].values else: xt = xtrain[self.features].values with open(scalertransformationFile, 'rb') as f: loaded_scaler_model = pickle.load(f) f.close() xt = xt.astype('float32') xt = loaded_scaler_model.transform(xt) pred_data = xt y_future = [] for i in range(no_of_prediction): pdata = pred_data[-lag_order:] if model.lower() == 'mlp': pdata = pdata.reshape((1,lag_order)) else: pdata = pdata.reshape((1,lag_order, len(sfeatures))) if (len(sfeatures) > 1): pred = loaded_model.predict(pdata) predout = loaded_scaler_model.inverse_transform(pred) y_future.append(predout) pred_data=np.append(pred_data,pred,axis=0) else: pred = loaded_model.predict(pdata) predout = loaded_scaler_model.inverse_transform(pred) y_future.append(predout.flatten()[-1]) pred_data = np.append(pred_data,pred) col = targetFeature.split(",") pred = pd.DataFrame(index=range(0,len(y_future)),columns=col) for i in range(0, len(y_future)): pred.iloc[i] = y_future[i] predictions = pred log.info("-------> Predictions") log.info(predictions) forecast_output = predictions.to_json(orient='records') else: pass log.info('Status:-|... AION TimeSeries Forecasting completed') #task 11997 log.info("------ Forecast Prediction End -------------\n") log.info('================ Time Series Forecasting Completed ================\n') #task 11997 if recommenderStatus: log.info('\n================ Recommender Started ================ ') log.info('Status:-|... AION Recommender started') learner_type = 'RecommenderSystem' model_type = 'RecommenderSystem' modelType = model_type model = model_type targetColumn='' datacolumns=list(dataFrame.columns) self.features=datacolumns svd_params = config_obj.getEionRecommenderConfiguration() from recommender.item_rating import recommendersystem recommendersystemObj = recommendersystem(modelFeatures,svd_params) testPercentage = config_obj.getAIONTestTrainPercentage() #Unnati saved_model,rmatrix,score,trainingperformancematrix,model_tried = recommendersystemObj.recommender_model(dataFrame,outputLocation) scoreParam = 'NA' #Task 11190 log.info('Status:-|... AION Recommender completed') log.info('================ Recommender Completed ================\n') if textsummarizationStatus: log.info('\n================ text Summarization Started ================ ') log.info('Status:-|... AION text Summarization started') modelType = 'textsummarization' model_type = 'textsummarization' learner_type = 'Text Summarization' modelName='TextSummarization' from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestClassifier from scipy import spatial model = model_type dataLocationTS,deployLocationTS,KeyWordsTS,pathForKeywordFileTS = config_obj.getEionTextSummarizationConfig() #print("dataLocationTS",dataLocationTS) #print("deployLocationTS",deployLocationTS) #print("KeyWordsTS",KeyWordsTS) #print("pathForKeywordFileTS",pathForKeywordFileTS) #PreTrained Model Download starts------------------------- from appbe.dataPath import DATA_DIR preTrainedModellocation = Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization' preTrainedModellocation = Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization' models = {'glove':{50:'glove.6B.50d.w2vformat.txt'}} supported_models = [x for y in models.values() for x in y.values()] modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization' Path(modelsPath).mkdir(parents=True, exist_ok=True) p = Path(modelsPath).glob('**/*') modelsDownloaded = [x.name for x in p if x.name in supported_models] selected_model="glove.6B.50d.w2vformat.txt" if selected_model not in modelsDownloaded: print("Model not in folder, downloading") import urllib.request location = Path(modelsPath) local_file_path = location/f"glove.6B.50d.w2vformat.txt" urllib.request.urlretrieve(f'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/glove.6B.50d.w2vformat.txt', local_file_path) from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("sshleifer/distilbart-cnn-12-6") model = AutoModelForSeq2SeqLM.from_pretrained("sshleifer/distilbart-cnn-12-6") tokenizer.save_pretrained(preTrainedModellocation) model.save_pretrained(preTrainedModellocation) #PreTrained Model Download ends----------------------- deployLocationData=deployLocation+"\\data\\" modelLocation=Path(DATA_DIR)/'PreTrainedModels'/'TextSummarization'/'glove.6B.50d.w2vformat.txt' KeyWordsTS=KeyWordsTS.replace(",", " ") noOfKeyword = len(KeyWordsTS.split()) keywords = KeyWordsTS.split() embeddings = {} word = '' with open(modelLocation, 'r', encoding="utf8") as f: header = f.readline() header = header.split(' ') vocab_size = int(header[0]) embed_size = int(header[1]) for i in range(vocab_size): data = f.readline().strip().split(' ') word = data[0] embeddings[word] = [float(x) for x in data[1:]] readData=pd.read_csv(pathForKeywordFileTS,encoding='utf-8',encoding_errors= 'replace') for i in range(noOfKeyword): terms=(sorted(embeddings.keys(), key=lambda word: spatial.distance.euclidean(embeddings[word], embeddings[keywords[i]])) )[1:6] readData = readData.append({'Keyword': keywords[i]}, ignore_index=True) for j in range(len(terms)): readData = readData.append({'Keyword': terms[j]}, ignore_index=True) deployLocationDataKwDbFile=deployLocationData+"keywordDataBase.csv" readData.to_csv(deployLocationDataKwDbFile,encoding='utf-8',index=False) datalocation_path=dataLocationTS path=Path(datalocation_path) fileList=os.listdir(path) textExtraction = pd.DataFrame() textExtraction['Sentences']="" rowIndex=0 for i in range(len(fileList)): fileName=str(datalocation_path)+"\\"+str(fileList[i]) if fileName.endswith(".pdf"): print("\n files ",fileList[i]) from pypdf import PdfReader reader = PdfReader(fileName) number_of_pages = len(reader.pages) text="" textOutputForFile="" OrgTextOutputForFile="" for i in range(number_of_pages) : page = reader.pages[i] text1 = page.extract_text() text=text+text1 import nltk tokens = nltk.sent_tokenize(text) for sentence in tokens: sentence=sentence.replace("\n", " ") if (len(sentence.split()) < 4 ) or (len(str(sentence.split(',')).split()) < 8)or (any(chr.isdigit() for chr in sentence)) : continue textExtraction.at[rowIndex,'Sentences']=str(sentence.strip()) rowIndex=rowIndex+1 if fileName.endswith(".txt"): print("\n txt files",fileList[i]) data=[] with open(fileName, "r",encoding="utf-8") as f: data.append(f.read()) str1 = "" for ele in data: str1 += ele sentences=str1.split(".") count=0 for sentence in sentences: count += 1 textExtraction.at[rowIndex+i,'Sentences']=str(sentence.strip()) rowIndex=rowIndex+1 df=textExtraction #print("textExtraction",textExtraction) deployLocationDataPreProcessData=deployLocationData+"preprocesseddata.csv" save_csv_compressed(deployLocationDataPreProcessData, df, encoding='utf-8') df['Label']=0 kw=pd.read_csv(deployLocationDataKwDbFile,encoding='utf-8',encoding_errors= 'replace') Keyword_list = kw['Keyword'].tolist() for i in df.index: for x in Keyword_list: if (str(df["Sentences"][i])).find(x) != -1: df['Label'][i]=1 break deployLocationDataPostProcessData=deployLocationData+"postprocesseddata.csv" #df.to_csv(deployLocationDataPostProcessData,encoding='utf-8') save_csv_compressed(deployLocationDataPostProcessData, df, encoding='utf-8') labelledData=df train_df=labelledData labelencoder = LabelEncoder() train_df['Sentences'] = labelencoder.fit_transform(train_df['Sentences']) X = train_df.drop('Label',axis=1) y = train_df['Label'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) Classifier = RandomForestClassifier(n_estimators = 10, random_state = 42) modelTs=Classifier.fit(X, y) import pickle deployLocationTS=deployLocation+"\\model\\"+iterName+'_'+iterVersion+'.sav' deployLocationTS2=deployLocation+"\\model\\"+"classificationModel.sav" pickle.dump(modelTs, open(deployLocationTS, 'wb')) pickle.dump(modelTs, open(deployLocationTS2, 'wb')) print("\n trainModel Ends") saved_model = 'textsummarization_'+iterName+'_'+iterVersion log.info('Status:-|... AION text summarization completed') model = learner_type log.info('================ text summarization Completed ================\n') if survival_analysis_status: sa_method = config_obj.getEionanomalyModels() labeldict = {} log.info('\n================ SurvivalAnalysis Started ================ ') log.info('Status:-|... AION SurvivalAnalysis started') log.info('\n================ SurvivalAnalysis DataFrame ================ ') log.info(dataFrame) from survival import survival_analysis from learner.machinelearning import machinelearning sa_obj = survival_analysis.SurvivalAnalysis(dataFrame, preprocess_pipe, sa_method, targetFeature, datetimeFeature, filter_expression, profilerObj.train_features_type) if sa_obj != None: predict_json = sa_obj.learn() if sa_method.lower() in ['kaplanmeierfitter','kaplanmeier','kaplan-meier','kaplan meier','kaplan','km','kmf']: predicted = sa_obj.models[0].predict(dataFrame[datetimeFeature]) status, msg = save_csv(predicted,predicted_data_file) if not status: log.info('CSV File Error: ' + str(msg)) self.features = [datetimeFeature] elif sa_method.lower() in ['coxphfitter','coxregression','cox-regression','cox regression','coxproportionalhazard','coxph','cox','cph']: predicted = sa_obj.models[0].predict_cumulative_hazard(dataFrame) datacolumns = list(dataFrame.columns) targetColumn = targetFeature if targetColumn in datacolumns: datacolumns.remove(targetColumn) self.features = datacolumns score = sa_obj.score scoreParam = 'Concordance_Index' status,msg = save_csv(predicted,predicted_data_file) if not status: log.info('CSV File Error: ' + str(msg)) model = sa_method modelType = "SurvivalAnalysis" model_type = "SurvivalAnalysis" modelName = sa_method i = 1 for mdl in sa_obj.models: saved_model = "%s_%s_%s_%d.sav"%(model_type,sa_method,iterVersion,i) pickle.dump(mdl, open(os.path.join(deployLocation,'model',saved_model), 'wb')), i+=1 p = 1 for plot in sa_obj.plots: img_name = "%s_%d.png"%(sa_method,p) img_location = os.path.join(imageFolderLocation,img_name) plot.savefig(img_location,bbox_inches='tight') sa_images.append(img_location) p+=1 log.info('Status:-|... AION SurvivalAnalysis completed') log.info('\n================ SurvivalAnalysis Completed ================ ') if visualizationstatus: visualizationJson = config_obj.getEionVisualizationConfiguration() log.info('\n================== Visualization Recommendation Started ==================') visualizer_mlstart = time.time() from visualization.visualization import Visualization visualizationObj = Visualization(iterName,iterVersion,dataFrame,visualizationJson,datetimeFeature,deployLocation,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,self.features,targetFeature,model_type,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,self.vectorizerFeatures,self.textFeatures,self.numericalFeatures,self.nonNumericFeatures,self.emptyFeatures,self.dfrows,self.dfcols,saved_model,scoreParam,learner_type,model,featureReduction,reduction_data_file) visualizationObj.visualizationrecommandsystem() visualizer_mlexecutionTime=time.time() - visualizer_mlstart log.info('-------> COMPUTING: Total Visualizer Execution Time '+str(visualizer_mlexecutionTime)) log.info('================== Visualization Recommendation Started ==================\n') if similarityIdentificationStatus or contextualSearchStatus: datacolumns=list(dataFrame.columns) features = modelFeatures.split(",") if indexFeature != '' and indexFeature != 'NA': iFeature = indexFeature.split(",") for ifea in iFeature: if ifea not in features: features.append(ifea) for x in features: dataFrame[x] = similaritydf[x] #get vectordb(chromadb) status selected if similarityIdentificationStatus: learner_type = 'similarityIdentification' else: learner_type = 'contextualSearch' vecDBCosSearchStatus = config_obj.getVectorDBCosSearchStatus(learner_type) if vecDBCosSearchStatus: status, msg = save_chromadb(dataFrame, config_obj, trained_data_file, modelFeatures) if not status: log.info('Vector DB File Error: '+str(msg)) else: status, msg = save_csv(dataFrame,trained_data_file) if not status: log.info('CSV File Error: '+str(msg)) self.features = datacolumns model_type = config_obj.getAlgoName(problem_type) model = model_type #bug 12833 model_tried = '{"Model":"'+model_type+'","FeatureEngineering":"NA","Score":"NA","ModelUncertainty":"NA"}' modelType = learner_type saved_model = learner_type score = 'NA' if deploy_status: if str(model) != 'None': log.info('\n================== Deployment Started ==================') log.info('Status:-|... AION Creating Prediction Service Start') deployer_mlstart = time.time() deployJson = config_obj.getEionDeployerConfiguration() deploy_name = iterName+'_'+iterVersion from prediction_package.model_deploy import DeploymentManager if textsummarizationStatus : deploy = DeploymentManager() deploy.deployTSum(deployLocation,preTrainedModellocation) codeConfigure.save_config(deployLocation) deployer_mlexecutionTime=time.time() - deployer_mlstart log.info('-------> COMPUTING: Total Deployer Execution Time '+str(deployer_mlexecutionTime)) log.info('Status:-|... AION Deployer completed') log.info('================== Deployment Completed ==================') else: deploy = DeploymentManager() deploy.deploy_model(deploy_name,deployJson,learner_type,model_type,model,scoreParam,saved_model,deployLocation,self.features,self.profilerAction,dataLocation,labelMaps,column_merge_flag,self.textFeatures,self.numericalFeatures,self.nonNumericFeatures,preprocessing_pipe,numericToLabel_json,threshold,loss_matrix,optimizer,firstDocFeature,secondDocFeature,padding_length,trained_data_file,dictDiffCount,targetFeature,normalizer_pickle_file,normFeatures,pcaModel_pickle_file,bpca_features,apca_features,self.method,deployFolder,iterName,iterVersion,self.wordToNumericFeatures,imageconfig,sessonal_freq,additional_regressors,grouperbyjson,rowfilterexpression,xtrain,profiled_data_file,conversion_method,modelFeatures,indexFeature,lag_order,scalertransformationFile,noofforecasts,preprocess_pipe,preprocess_out_columns, label_encoder,datetimeFeature,usecaseLocation,deploy_config) codeConfigure.update_config('deploy_path',os.path.join(deployLocation,'publish')) codeConfigure.save_config(deployLocation) deployer_mlexecutionTime=time.time() - deployer_mlstart log.info('-------> COMPUTING: Total Deployer Execution Time '+str(deployer_mlexecutionTime)) log.info('Status:-|... AION Creating Prediction Service completed') log.info('================== Deployment Completed ==================') if not outputDriftStatus and not inputDriftStatus: from transformations.dataProfiler import set_features self.features = set_features(self.features,profilerObj) self.matrix += '}' self.trainmatrix += '}' print(model_tried) model_tried = eval('['+model_tried+']') matrix = eval(self.matrix) trainmatrix = eval(self.trainmatrix) deployPath = deployLocation.replace(os.sep, '/') if survival_analysis_status: output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"survivalProbability":json.loads(predict_json),"featuresused":str(self.features),"targetFeature":str(targetColumn),"EvaluatedModels":model_tried,"imageLocation":str(sa_images),"LogFile":logFileName}} elif not timeseriesStatus: try: json.dumps(params) output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":params,"EvaluatedModels":model_tried,"LogFile":logFileName}} except: output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":"","EvaluatedModels":model_tried,"LogFile":logFileName}} else: if config_obj.summarize: modelType = 'Summarization' output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"EvaluatedModels":model_tried,'forecasts':json.loads(forecast_output),"LogFile":logFileName}} if bool(topics) == True: output_json['topics'] = topics with open(outputjsonFile, 'w',encoding='utf-8') as f: json.dump(output_json, f) f.close() output_json = json.dumps(output_json) log.info('\n------------- Summary ------------') log.info('------->No of rows & columns in data:('+str(self.dfrows)+','+str(self.dfcols)+')') log.info('------->No of missing Features :'+str(len(self.mFeatures))) log.info('------->Missing Features:'+str(self.mFeatures)) log.info('------->Text Features:'+str(self.textFeatures)) log.info('------->No of Nonnumeric Features :'+str(len(self.nonNumericFeatures))) log.info('------->Non-Numeric Features :' +str(self.nonNumericFeatures)) if threshold == -1: log.info('------->Threshold: NA') else: log.info('------->Threshold: '+str(threshold)) log.info('------->Label Maps of Target Feature for classification :'+str(labelMaps)) for i in range(0,len(self.similarGroups)): log.info('------->Similar Groups '+str(i+1)+' '+str(self.similarGroups[i])) if((learner_type != 'TS') & (learner_type != 'AR')): log.info('------->No of columns and rows used for Modeling :'+str(featureDataShape)) log.info('------->Features Used for Modeling:'+str(self.features)) log.info('------->Target Feature: '+str(targetColumn)) log.info('------->Best Model Score :'+str(score)) log.info('------->Best Parameters:'+str(params)) log.info('------->Type of Model :'+str(modelType)) log.info('------->Best Model :'+str(model)) log.info('------------- Summary ------------\n') log.info('Status:-|... AION Model Training Successfully Done') except Exception as inst: log.info('server code execution failed !....'+str(inst)) log.error(inst, exc_info = True) output_json = {"status":"FAIL","message":str(inst).strip('"'),"LogFile":logFileName} output_json = json.dumps(output_json) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) executionTime = timeit.default_timer() - startTime log.info('\nTotal execution time(sec) :'+str(executionTime)) log.info('\n------------- Output JSON ------------') log.info('aion_learner_status:'+str(output_json)) log.info('------------- Output JSON ------------\n') for hdlr in log.handlers[:]: # remove the existing file handlers if isinstance(hdlr,logging.FileHandler): hdlr.close() log.removeHandler(hdlr) return output_json def split_into_train_test_data(self,featureData,targetData,testPercentage,log,modelType='classification'): #Unnati log.info('\n-------------- Test Train Split ----------------') if testPercentage == 0 or testPercentage == 100: #Unnati xtrain=featureData ytrain=targetData xtest=pd.DataFrame() ytest=pd.DataFrame() else: testSize= testPercentage/100 #Unnati if modelType == 'regression': log.info('-------> Split Type: Random Split') xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True,random_state=42) else: try: log.info('-------> Split Type: Stratify Split') xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,stratify=targetData,test_size=testSize,random_state=42) except Exception as ValueError: count_unique = targetData.value_counts() feature_with_single_count = count_unique[ count_unique == 1].index.tolist() error = f"The least populated class in {feature_with_single_count} has only 1 member, which is too few. The minimum number of groups for any class cannot be less than 2" raise Exception(error) from ValueError except: log.info('-------> Split Type: Random Split') xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True,random_state=42) log.info('Status:- !... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test') #Unnati log.info('-------> Train Data Shape: '+str(xtrain.shape)+' ---------->') log.info('-------> Test Data Shape: '+str(xtest.shape)+' ---------->') log.info('-------------- Test Train Split End ----------------\n') return(xtrain,ytrain,xtest,ytest) def aion_train_model(arg): warnings.filterwarnings('ignore') config_path = Path( arg) with open( config_path, 'r') as f: config = json.load( f) log = set_log_handler(config['basic']) log.info('************* Version - v'+AION_VERSION+' *************** \n') msg = '-------> Execution Start Time: '+ datetime.datetime.now(timezone("Asia/Kolkata")).strftime('%Y-%m-%d %H:%M:%S' + ' IST') log.info(msg) try: config_validate(arg) valid, msg = pushRecordForTraining() if valid: serverObj = server() configObj = AionConfigManager() codeConfigure = code_configure() codeConfigure.create_config(config) readConfistatus,msg = configObj.readConfigurationFile(config) if(readConfistatus == False): raise ValueError( msg) output = serverObj.startScriptExecution(configObj, codeConfigure, log) else: output = {"status":"LicenseVerificationFailed","message":str(msg).strip('"')} output = json.dumps(output) print( f"\naion_learner_status:{output}\n") log.info( f"\naion_learner_status:{output}\n") except Exception as inst: output = {"status":"FAIL","message":str(inst).strip('"')} output = json.dumps(output) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) print(f"\naion_learner_status:{output}\n") log.info( f"\naion_learner_status:{output}\n") return output if __name__ == "__main__": aion_train_model( sys.argv[1])
aion_uncertainties.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import joblib import time import pandas as pd import numpy as np import argparse import json import os import pathlib from pathlib import Path from uncertainties.uq_main import aionUQ import os from datetime import datetime from os.path import expanduser import platform import logging class run_uq: def __init__(self,modelfeatures,modelFile,csvFile,target): self.modelfeatures=modelfeatures self.modelFile=modelFile self.csvFile=csvFile self.target=target ##UQ classification fn def getUQclassification(self,model,ProblemName,Params): df = pd.read_csv(self.csvFile) # # object_cols = [col for col, col_type in df.dtypes.iteritems() if col_type == 'object'] -- Fix for python 3.8.11 update (in 2.9.0.8) object_cols = [col for col, col_type in zip(df.columns,df.dtypes) if col_type == 'object'] df = df.drop(object_cols, axis=1) df = df.dropna(axis=1) df = df.reset_index(drop=True) modelfeatures = self.modelfeatures #tar = args.target # target = df[tar] y=df[self.target].values y = y.flatten() X = df.drop(self.target, axis=1) try: uqObj=aionUQ(df,X,y,ProblemName,Params,model,modelfeatures,self.target) accuracy,uq_ece,output_jsonobject=uqObj.uqMain_BBMClassification() except Exception as e: print("uq error",e) # print("UQ Classification: \n",output_jsonobject) # print(accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per) #print(output_jsonobject) return accuracy,uq_ece,output_jsonobject ##UQ regression fn def getUQregression(self,model,ProblemName,Params): df = pd.read_csv(self.csvFile) modelfeatures = self.modelfeatures dfp = df[modelfeatures] tar = self.target target = df[tar] uqObj=aionUQ(df,dfp,target,ProblemName,Params,model,modelfeatures,tar) total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression() return total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject def uqMain(self,model): #print("inside uq main.\n") reg_status="" class_status="" algorithm_status="" try: model=model if Path(self.modelFile).is_file(): ProblemName = model.__class__.__name__ if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecisionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighborsClassifier','GradientBoostingClassifier']: Problemtype = 'Classification' elif ProblemName in ['LinearRegression','Lasso','Ridge','DecisionTreeRegressor','RandomForestRegressor']: Problemtype = 'Regression' else: Problemtype = "None" if Problemtype.lower() == 'classification': try: Params = model.get_params() accuracy,uq_ece,output = self.getUQclassification(model,ProblemName,Params) class_status="SUCCESS" #print(output) except Exception as e: print(e) class_status="FAILED" output = {'Problem':'None','msg':str(e)} output = json.dumps(output) elif Problemtype.lower() == 'regression' : try: Params = model.get_params() total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,output = self.getUQregression(model,ProblemName,Params) #print(uq_jsonobject) reg_status="SUCCESS" except Exception as e: output = {'Problem':'None','msg':str(e)} output = json.dumps(output) reg_status="FAILED" else: try: output={} output['Problem']="None" output['msg']="Uncertainty Quantification not supported for this algorithm." output = json.dumps(output) algorithm_status="FAILED" except: algorithm_status="FAILED" except Exception as e: print(e) reg_status="FAILED" class_status="FAILED" algorithm_status="FAILED" output = {'Problem':'None','msg':str(e)} output = json.dumps(output) return class_status,reg_status,algorithm_status,output def aion_uq(modelFile,dataFile,features,targetfeatures): try: from appbe.dataPath import DEPLOY_LOCATION uqLogLocation = os.path.join(DEPLOY_LOCATION,'logs') try: os.makedirs(uqLogLocation) except OSError as e: if (os.path.exists(uqLogLocation)): pass else: raise OSError('uqLogLocation error.') filename_uq = 'uqlog_'+str(int(time.time())) filename_uq=filename_uq+'.log' filepath = os.path.join(uqLogLocation, filename_uq) print(filepath) logging.basicConfig(filename=filepath, format='%(message)s',filemode='w') log = logging.getLogger('aionUQ') log.setLevel(logging.INFO) log.info('************* Version - v1.7.0 *************** \n') if isinstance(features, list): modelfeatures = features else: if ',' in features: modelfeatures = [x.strip() for x in features.split(',')] else: modelfeatures = features.split(',') model = joblib.load(modelFile) uqobj = run_uq(modelfeatures,modelFile,dataFile,targetfeatures) class_status,reg_status,algorithm_status,output=uqobj.uqMain(model) if (class_status.lower() == 'failed'): log.info('uq classifiction failed./n') elif (class_status.lower() == 'success'): log.info('uq classifiction success./n') else: log.info('uq classifiction not used../n') if (reg_status.lower() == 'failed'): log.info('uq regression failed./n') elif (reg_status.lower() == 'success'): log.info('uq regression success./n') else: log.info('uq regression not used./n') if (algorithm_status.lower() == 'failed'): log.info('Problem type issue, UQ only support classification and regression. May be selected algorithm not supported by Uncertainty Quantification currently./n') except Exception as e: log.info('uq test failed.n'+str(e)) #print(e) output = {'Problem':'None','msg':str(e)} output = json.dumps(output) return(output) #Sagemaker main fn call if __name__=='__main__': try: parser = argparse.ArgumentParser() parser.add_argument('savFile') parser.add_argument('csvFile') parser.add_argument('features') parser.add_argument('target') args = parser.parse_args() home = expanduser("~") if platform.system() == 'Windows': uqLogLocation = os.path.join(home,'AppData','Local','HCLT','AION','uqLogs') else: uqLogLocation = os.path.join(home,'HCLT','AION','uqLogs') try: os.makedirs(uqLogLocation) except OSError as e: if (os.path.exists(uqLogLocation)): pass else: raise OSError('uqLogLocation error.') # self.sagemakerLogLocation=str(sagemakerLogLocation) filename_uq = 'uqlog_'+str(int(time.time())) filename_uq=filename_uq+'.log' # filename = 'mlopsLog_'+Time() filepath = os.path.join(uqLogLocation, filename_uq) logging.basicConfig(filename=filepath, format='%(message)s',filemode='w') log = logging.getLogger('aionUQ') log.setLevel(logging.DEBUG) if ',' in args.features: args.features = [x.strip() for x in args.features.split(',')] else: args.features = args.features.split(',') modelFile = args.savFile modelfeatures = args.features csvFile = args.csvFile target=args.target model = joblib.load(args.savFile) ##Main uq function call uqobj = run_uq(modelfeatures,modelFile,csvFile,target) class_status,reg_status,algorithm_status,output=uqobj.uqMain(model) if (class_status.lower() == 'failed'): log.info('uq classifiction failed./n') elif (class_status.lower() == 'success'): log.info('uq classifiction success./n') else: log.info('uq classifiction not used../n') if (reg_status.lower() == 'failed'): log.info('uq regression failed./n') elif (reg_status.lower() == 'success'): log.info('uq regression success./n') else: log.info('uq regression not used./n') if (algorithm_status.lower() == 'failed'): msg = 'Uncertainty Quantification not supported for this algorithm' log.info('Algorithm not supported by Uncertainty Quantification./n') output = {'Problem':'None','msg':str(msg)} output = json.dumps(output) except Exception as e: log.info('uq test failed.n'+str(e)) output = {'Problem':'None','msg':str(e)} output = json.dumps(output) #print(e) print(output)
aion_telemetry.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import requests import json import os from datetime import datetime import socket import getmac def telemetry_data(operation,Usecase,data): now = datetime.now() ID = datetime.timestamp(now) record_date = now.strftime("%y-%m-%d %H:%M:%S") try: user = os.getlogin() except: user = 'NA' computername = socket.getfqdn() macaddress = getmac.get_mac_address() item = {} item['ID'] = str(int(ID)) item['record_date'] = record_date item['UseCase'] = Usecase item['user'] = str(user) item['operation'] = operation item['remarks'] = data item['hostname'] = computername item['macaddress'] = macaddress url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry' record = {} record['TableName'] = 'AION_OPERATION' record['Item'] = item record = json.dumps(record) try: response = requests.post(url, data=record,headers={"x-api-key":"Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK","Content-Type":"application/json",}) check_telemetry_file() except Exception as inst: filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt') f=open(filename, "a+") f.write(record+'\n') f.close() def check_telemetry_file(): file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt') if(os.path.isfile(file_path)): f = open(file_path, 'r') file_content = f.read() f.close() matched_lines = file_content.split('\n') write_lines = [] url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry' for record in matched_lines: try: response = requests.post(url, data=record,headers={"x-api-key":"Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK","Content-Type":"application/json",}) except: write_lines.append(record) f = open(file_path, "a") f.seek(0) f.truncate() for record in write_lines: f.write(record+'\n') f.close() return True else: return True
aion_publish.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import platform import shutil import subprocess import sys import glob import json def publish(data): if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) model = jsonData['modelName'] version = jsonData['modelVersion'] deployFolder = jsonData['deployLocation'] model = model.replace(" ", "_") deployedPath = os.path.join(deployFolder,model+'_'+version) deployedPath = os.path.join(deployedPath,'WHEELfile') whlfilename='na' if os.path.isdir(deployedPath): for file in os.listdir(deployedPath): if file.endswith(".whl"): whlfilename = os.path.join(deployedPath,file) if whlfilename != 'na': subprocess.check_call([sys.executable, "-m", "pip", "uninstall","-y",model]) subprocess.check_call([sys.executable, "-m", "pip", "install", whlfilename]) status,pid,ip,port = check_service_running(jsonData['modelName'],jsonData['serviceFolder']) if status == 'Running': service_stop(json.dumps(jsonData)) service_start(json.dumps(jsonData)) output_json = {'status':"SUCCESS"} output_json = json.dumps(output_json) else: output_json = {'status':'Error','Msg':'Installation Package not Found'} output_json = json.dumps(output_json) return(output_json) def check_service_running(model,serviceFolder): model = model.replace(" ", "_") filename = model+'_service.py' modelservicefile = os.path.join(serviceFolder,filename) status = 'File Not Exist' ip = '' port = '' pid = '' if os.path.exists(modelservicefile): status = 'File Exist' import psutil for proc in psutil.process_iter(): pinfo = proc.as_dict(attrs=['pid', 'name', 'cmdline','connections']) if 'python' in pinfo['name']: if filename in pinfo['cmdline'][1]: status = 'Running' pid = pinfo['pid'] for x in pinfo['connections']: ip = x.laddr.ip port = x.laddr.port return(status,pid,ip,port) def service_stop(data): if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) status,pid,ip,port = check_service_running(jsonData['modelName'],jsonData['serviceFolder']) if status == 'Running': import psutil p = psutil.Process(int(pid)) p.terminate() time.sleep(2) output_json = {'status':'SUCCESS'} output_json = json.dumps(output_json) return(output_json) def service_start(data): if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) model = jsonData['modelName'] version = jsonData['modelVersion'] ip = jsonData['ip'] port = jsonData['port'] deployFolder = jsonData['deployLocation'] serviceFolder = jsonData['serviceFolder'] model = model.replace(" ", "_") deployLocation = os.path.join(deployFolder,model+'_'+version) org_service_file = os.path.abspath(os.path.join(os.path.dirname(__file__),'model_service.py')) filename = model+'_service.py' modelservicefile = os.path.join(serviceFolder,filename) status = 'File Not Exist' if os.path.exists(modelservicefile): status = 'File Exist' r = ([line.split() for line in subprocess.check_output("tasklist").splitlines()]) for i in range(len(r)): if filename in r[i]: status = 'Running' if status == 'File Not Exist': shutil.copy(org_service_file,modelservicefile) with open(modelservicefile, 'r+') as file: content = file.read() file.seek(0, 0) line = 'from '+model+' import aion_performance' file.write(line+"\n") line = 'from '+model+' import aion_drift' file.write(line+ "\n") line = 'from '+model+' import featureslist' file.write(line+ "\n") line = 'from '+model+' import aion_prediction' file.write(line+ "\n") file.write(content) file.close() status = 'File Exist' if status == 'File Exist': status,pid,ipold,portold = check_service_running(jsonData['modelName'],jsonData['serviceFolder']) if status != 'Running': command = "python "+modelservicefile+' '+str(port)+' '+str(ip) os.system('start cmd /c "'+command+'"') time.sleep(2) status = 'Running' output_json = {'status':'SUCCESS','Msg':status} output_json = json.dumps(output_json) return(output_json) if __name__ == "__main__": aion_publish(sys.argv[1])
aion_text_summarizer.py
import json import logging import os import shutil import time import sys from sys import platform from distutils.util import strtobool from config_manager.pipeline_config import AionConfigManager from summarizer import Summarizer # Base class for EION configuration Manager which read the needed f params from eion.json, initialize the parameterlist, read the respective params, store in variables and return back to caller function or external modules. class AionTextManager: def __init__(self): self.log = logging.getLogger('eion') self.data = '' self.problemType = '' self.basic = [] self.advance=[] def readTextfile(self,dataPath): #dataPath=self.[baisc][] file = open(dataPath, "r") data = file.read() return data #print(data) def generateSummary(self,data,algo,stype): bert_model = Summarizer() if stype == "large": bert_summary = ''.join(bert_model(data, min_length=300)) return(bert_summary) elif stype == "medium": bert_summary = ''.join(bert_model(data, min_length=150)) return(bert_summary) elif stype == "small": bert_summary = ''.join(bert_model(data, min_length=60)) return(bert_summary) def aion_textsummary(arg): Obj = AionTextManager() configObj = AionConfigManager() readConfistatus,msg = configObj.readConfigurationFile(arg) dataPath = configObj.getTextlocation() text_data = Obj.readTextfile(dataPath) getAlgo, getMethod = configObj.getTextSummarize() summarize = Obj.generateSummary(text_data, getAlgo, getMethod) output = {'status':'Success','summary':summarize} output_json = json.dumps(output) return(output_json) if __name__ == "__main__": aion_textsummary(sys.argv[1])
__init__.py
import os import sys sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
aion_service.py
#from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer from http.server import BaseHTTPRequestHandler,HTTPServer #from SocketServer import ThreadingMixIn from socketserver import ThreadingMixIn from functools import partial from http.server import SimpleHTTPRequestHandler, test import base64 from appbe.dataPath import DEPLOY_LOCATION ''' from augustus.core.ModelLoader import ModelLoader from augustus.strict import modelLoader ''' import pandas as pd import os,sys from os.path import expanduser import platform import numpy as np import configparser import threading import subprocess import argparse from functools import partial import re import cgi from datetime import datetime import json import sys from datetime import datetime user_records = {} class LocalModelData(object): models = {} class HTTPRequestHandler(BaseHTTPRequestHandler): def __init__(self, *args, **kwargs): username = kwargs.pop("username") password = kwargs.pop("password") self._auth = base64.b64encode(f"{username}:{password}".encode()).decode() super().__init__(*args) def do_HEAD(self): self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() def do_AUTHHEAD(self): self.send_response(401) self.send_header("WWW-Authenticate", 'Basic realm="Test"') self.send_header("Content-type", "text/html") self.end_headers() def do_POST(self): print("PYTHON ######## REQUEST ####### STARTED") if None != re.search('/AION/', self.path): ctype, pdict = cgi.parse_header(self.headers.get('content-type')) if ctype == 'application/json': if self.headers.get("Authorization") == None: self.do_AUTHHEAD() resp = "Authentication Failed: Auth Header Not Present" resp=resp.encode() self.wfile.write(resp) elif self.headers.get("Authorization") == "Basic " + self._auth: length = int(self.headers.get('content-length')) #data = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1) data = self.rfile.read(length) #print(data) #keyList = list(data.keys()) #print(keyList[0]) model = self.path.split('/')[-2] operation = self.path.split('/')[-1] home = expanduser("~") #data = json.loads(data) dataStr = data model_path = os.path.join(DEPLOY_LOCATION,model) isdir = os.path.isdir(model_path) if isdir: if operation.lower() == 'predict': predict_path = os.path.join(model_path,'aion_predict.py') outputStr = subprocess.check_output([sys.executable,predict_path,dataStr]) outputStr = outputStr.decode('utf-8') outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1) outputStr = outputStr.strip() resp = outputStr elif operation.lower() == 'spredict': try: predict_path = os.path.join(model_path,'aion_spredict.py') print(predict_path) outputStr = subprocess.check_output([sys.executable,predict_path,dataStr]) outputStr = outputStr.decode('utf-8') outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1) outputStr = outputStr.strip() resp = outputStr except Exception as e: print(e) elif operation.lower() == 'features': predict_path = os.path.join(model_path,'featureslist.py') outputStr = subprocess.check_output([sys.executable,predict_path,dataStr]) outputStr = outputStr.decode('utf-8') outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1) outputStr = outputStr.strip() resp = outputStr elif operation.lower() == 'explain': predict_path = os.path.join(model_path,'explainable_ai.py') outputStr = subprocess.check_output([sys.executable,predict_path,'local',dataStr]) outputStr = outputStr.decode('utf-8') outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1) outputStr = outputStr.strip() elif operation.lower() == 'monitoring': predict_path = os.path.join(model_path,'aion_ipdrift.py') outputStr = subprocess.check_output([sys.executable,predict_path,dataStr]) outputStr = outputStr.decode('utf-8') outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1) outputStr = outputStr.strip() elif operation.lower() == 'performance': predict_path = os.path.join(model_path,'aion_opdrift.py') outputStr = subprocess.check_output([sys.executable,predict_path,dataStr]) outputStr = outputStr.decode('utf-8') outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1) outputStr = outputStr.strip() elif operation.lower() == 'pattern_anomaly_predict': data = json.loads(data) anomaly = False remarks = '' clusterid = -1 configfilename = os.path.join(model_path,'datadetails.json') filename = os.path.join(model_path,'clickstream.json') clusterfilename = os.path.join(model_path,'stateClustering.csv') probfilename = os.path.join(model_path,'stateTransitionProbability.csv') dfclus = pd.read_csv(clusterfilename) dfprod = pd.read_csv(probfilename) f = open(configfilename, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) activity = configSettingsJson['activity'] sessionid = configSettingsJson['sessionid'] f = open(filename, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) groupswitching = configSettingsJson['groupswitching'] page_threshold = configSettingsJson['transitionprobability'] chain_count = configSettingsJson['transitionsequence'] chain_probability = configSettingsJson['sequencethreshold'] currentactivity = data[activity] if bool(user_records): sessionid = data[sessionid] if sessionid != user_records['SessionID']: user_records['SessionID'] = sessionid prevactivity = '' user_records['probarry'] = [] user_records['prevclusterid'] = -1 user_records['NoOfClusterHopping'] = 0 user_records['pageclicks'] = 1 else: prevactivity = user_records['Activity'] user_records['Activity'] = currentactivity pageswitch = True if prevactivity == currentactivity or prevactivity == '': probability = 0 pageswitch = False remarks = '' else: user_records['pageclicks'] += 1 df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)] if df1.empty: remarks = 'Anomaly Detected - User in unusual state' anomaly = True clusterid = -1 probability = 0 user_records['probarry'].append(probability) n=int(chain_count) num_list = user_records['probarry'][-n:] avg = sum(num_list)/len(num_list) for index, row in dfclus.iterrows(): clusterlist = row["clusterlist"] if currentactivity in clusterlist: clusterid = row["clusterid"] else: probability = df1['Probability'].iloc[0] user_records['probarry'].append(probability) n=int(chain_count) num_list = user_records['probarry'][-n:] davg = sum(num_list)/len(num_list) for index, row in dfclus.iterrows(): clusterlist = row["clusterlist"] if currentactivity in clusterlist: clusterid = row["clusterid"] remarks = '' if user_records['prevclusterid'] != -1: if probability == 0 and user_records['prevclusterid'] != clusterid: user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1 if user_records['pageclicks'] == 1: remarks = 'Anomaly Detected - Frequent Cluster Hopping' anomaly = True else: remarks = 'Cluster Hopping Detected' user_records['pageclicks'] = 0 if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False: remarks = 'Anomaly Detected - Multiple Cluster Hopping' anomaly = True elif probability == 0: remarks = 'Anomaly Detected - Unusual State Transition Detected' anomaly = True elif probability <= float(page_threshold): remarks = 'Anomaly Detected - In-frequent State Transition Detected' anomaly = True else: if pageswitch == True: if probability == 0: remarks = 'Anomaly Detected - Unusual State Transition Detected' anomaly = True elif probability <= float(page_threshold): remarks = 'Anomaly Detected - In-frequent State Transition Detected' anomaly = True else: remarks = '' if davg < float(chain_probability): if anomaly == False: remarks = 'Anomaly Detected - In-frequent Pattern Detected' anomaly = True else: user_records['SessionID'] = data[sessionid] user_records['Activity'] = data[activity] user_records['probability'] = 0 user_records['probarry'] = [] user_records['chainprobability'] = 0 user_records['prevclusterid'] = -1 user_records['NoOfClusterHopping'] = 0 user_records['pageclicks'] = 1 for index, row in dfclus.iterrows(): clusterlist = row["clusterlist"] if currentactivity in clusterlist: clusterid = row["clusterid"] user_records['prevclusterid'] = clusterid outputStr = '{"status":"SUCCESS","data":{"Anomaly":"'+str(anomaly)+'","Remarks":"'+str(remarks)+'"}}' elif operation.lower() == 'pattern_anomaly_settings': data = json.loads(data) groupswitching = data['groupswitching'] transitionprobability = data['transitionprobability'] transitionsequence = data['transitionsequence'] sequencethreshold = data['sequencethreshold'] filename = os.path.join(model_path,'clickstream.json') data = {} data['groupswitching'] = groupswitching data['transitionprobability'] = transitionprobability data['transitionsequence'] = transitionsequence data['sequencethreshold'] = sequencethreshold updatedConfig = json.dumps(data) with open(filename, "w") as fpWrite: fpWrite.write(updatedConfig) fpWrite.close() outputStr = '{"Status":"SUCCESS"}' else: outputStr = "{'Status':'Error','Msg':'Operation not supported'}" else: outputStr = "{'Status':'Error','Msg':'Model Not Present'}" resp = outputStr resp=resp+"\n" resp=resp.encode() self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() self.wfile.write(resp) else: self.do_AUTHHEAD() self.wfile.write(self.headers.get("Authorization").encode()) resp = "Authentication Failed" resp=resp.encode() self.wfile.write(resp) else: print("python ==> else1") self.send_response(403) self.send_header('Content-Type', 'application/json') self.end_headers() print("PYTHON ######## REQUEST ####### ENDED") return def getModelFeatures(self,modelSignature): datajson = {'Body':'Gives the list of features'} home = expanduser("~") if platform.system() == 'Windows': predict_path = os.path.join(home,'AppData','Local','HCLT','AION','target',modelSignature,'featureslist.py') else: predict_path = os.path.join(home,'HCLT','AION','target',modelSignature,'featureslist.py') if(os.path.isfile(predict_path)): outputStr = subprocess.check_output([sys.executable,predict_path]) outputStr = outputStr.decode('utf-8') outputStr = re.search(r'features:(.*)',str(outputStr), re.IGNORECASE).group(1) outputStr = outputStr.strip() displaymsg = outputStr #displaymsg = json.dumps(displaymsg) return(True,displaymsg) else: displaymsg = "{'status':'ERROR','msg':'Unable to fetch featuers'}" return(False,displaymsg) def getFeatures(self,modelSignature): datajson = {'Body':'Gives the list of features'} urltext = '/AION/UseCase_Version/features' if modelSignature != '': status,displaymsg = self.getModelFeatures(modelSignature) if status: urltext = '/AION/'+modelSignature+'/features' else: displaymsg = json.dumps(datajson) else: displaymsg = json.dumps(datajson) msg=""" URL:{url} RequestType: POST Content-Type=application/json Output: {displaymsg}. """.format(url=urltext,displaymsg=displaymsg) return(msg) def features_help(self,modelSignature): home = expanduser("~") if platform.system() == 'Windows': display_path = os.path.join(home,'AppData','Local','HCLT','AION','target',modelSignature,'display.json') else: display_path = os.path.join(home,'HCLT','AION','target',modelSignature,'display.json') #display_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'target',model,'display.json') datajson = {'Body':'Data Should be in JSON Format'} if(os.path.isfile(display_path)): with open(display_path) as file: config = json.load(file) file.close() datajson={} for feature in config['numericalFeatures']: if feature != config['targetFeature']: datajson[feature] = 'Numeric Value' for feature in config['nonNumericFeatures']: if feature != config['targetFeature']: datajson[feature] = 'Category Value' for feature in config['textFeatures']: if feature != config['targetFeature']: datajson[feature] = 'Category Value' displaymsg = json.dumps(datajson) return(displaymsg) def predict_help(self,modelSignature): if modelSignature != '': displaymsg = self.features_help(modelSignature) urltext = '/AION/'+modelSignature+'/predict' else: datajson = {'Body':'Data Should be in JSON Format'} displaymsg = json.dumps(datajson) urltext = '/AION/UseCase_Version/predict' msg=""" URL:{url} RequestType: POST Content-Type=application/json Body: {displaymsg} Output: prediction,probability(if Applicable),remarks corresponding to each row. """.format(url=urltext,displaymsg=displaymsg) return(msg) def performance_help(self,modelSignature): if modelSignature != '': urltext = '/AION/'+modelSignature+'/performance' else: urltext = '/AION/UseCase_Version/performance' datajson = {"trainingDataLocation":"Reference Data File Path","currentDataLocation":"Latest Data File Path"} displaymsg = json.dumps(datajson) msg=""" URL:{url} RequestType: POST Content-Type=application/json Body: {displaymsg} Output: HTML File Path.""".format(url=urltext,displaymsg=displaymsg) return(msg) def monitoring_help(self,modelSignature): if modelSignature != '': urltext = '/AION/'+modelSignature+'/monitoring' else: urltext = '/AION/UseCase_Version/monitoring' datajson = {"trainingDataLocation":"Reference Data File Path","currentDataLocation":"Latest Data File Path"} displaymsg = json.dumps(datajson) msg=""" URL:{url} RequestType: POST Content-Type=application/json Body: {displaymsg} Output: Affected Columns. HTML File Path.""".format(url=urltext,displaymsg=displaymsg) return(msg) def explain_help(self,modelSignature): if modelSignature != '': displaymsg = self.features_help(modelSignature) urltext = '/AION/'+modelSignature+'/explain' else: datajson = {'Body':'Data Should be in JSON Format'} displaymsg = json.dumps(datajson) urltext = '/AION/UseCase_Version/explain' msg=""" URL:{url} RequestType: POST Content-Type=application/json Body: {displaymsg} Output: anchor (Local Explanation),prediction,forceplot,multidecisionplot.""".format(url=urltext,displaymsg=displaymsg) return(msg) def help_text(self,modelSignature): predict_help = self.predict_help(modelSignature) explain_help = self.explain_help(modelSignature) features_help = self.getFeatures(modelSignature) monitoring_help = self.monitoring_help(modelSignature) performance_help = self.performance_help(modelSignature) msg=""" Following URL: Prediction {predict_help} Local Explaination {explain_help} Features {features_help} Monitoring {monitoring_help} Performance {performance_help} """.format(predict_help=predict_help,explain_help=explain_help,features_help=features_help,monitoring_help=monitoring_help,performance_help=performance_help) return msg def do_GET(self): print("PYTHON ######## REQUEST ####### STARTED") if None != re.search('/AION/', self.path): self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() helplist = self.path.split('/')[-1] print(helplist) if helplist.lower() == 'help': model = self.path.split('/')[-2] if model.lower() == 'aion': model ='' msg = self.help_text(model) elif helplist.lower() == 'predict': model = self.path.split('/')[-2] if model.lower() == 'aion': model ='' msg = self.predict_help(model) elif helplist.lower() == 'explain': model = self.path.split('/')[-2] if model.lower() == 'aion': model ='' msg = self.explain_help(model) elif helplist.lower() == 'monitoring': model = self.path.split('/')[-2] if model.lower() == 'aion': model ='' msg = self.monitoring_help(model) elif helplist.lower() == 'performance': model = self.path.split('/')[-2] if model.lower() == 'aion': model ='' msg = self.performance_help(model) elif helplist.lower() == 'features': model = self.path.split('/')[-2] if model.lower() == 'aion': model ='' status,msg = self.getModelFeatures(model) else: model = self.path.split('/')[-2] if model.lower() == 'aion': model =helplist msg = self.help_text(model) self.wfile.write(msg.encode()) else: self.send_response(403) self.send_header('Content-Type', 'application/json') self.end_headers() return class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): allow_reuse_address = True def shutdown(self): self.socket.close() HTTPServer.shutdown(self) class SimpleHttpServer(): def __init__(self, ip, port,username,password): handler_class = partial(HTTPRequestHandler,username=username,password=password,) self.server = ThreadedHTTPServer((ip,port), handler_class) def start(self): self.server_thread = threading.Thread(target=self.server.serve_forever) self.server_thread.daemon = True self.server_thread.start() def waitForThread(self): self.server_thread.join() def stop(self): self.server.shutdown() self.waitForThread() def start_server(ip,port,username,password): server = SimpleHttpServer(ip,int(port),username,password) print('HTTP Server Running...........') server.start() server.waitForThread()
aion_online_pipeline.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import sys import json import datetime, time, timeit import logging logging.getLogger('tensorflow').disabled = True import shutil import warnings from config_manager.online_pipeline_config import OTAionConfigManager from records import pushrecords import logging import mlflow from pathlib import Path from pytz import timezone def pushRecordForOnlineTraining(): try: from appbe.pages import getversion status,msg = pushrecords.enterRecord(AION_VERSION) except Exception as e: print("Exception", e) status = False msg = str(e) return status,msg def mlflowSetPath(path,experimentname): import mlflow url = "file:" + str(Path(path).parent.parent) + "/mlruns" mlflow.set_tracking_uri(url) mlflow.set_experiment(str(experimentname)) class server(): def __init__(self): self.response = None self.dfNumCols=0 self.dfNumRows=0 self.features=[] self.mFeatures=[] self.emptyFeatures=[] self.vectorizerFeatures=[] self.wordToNumericFeatures=[] self.profilerAction = [] self.targetType = '' self.matrix1='{' self.matrix2='{' self.matrix='{' self.trainmatrix='{' self.numericalFeatures=[] self.nonNumericFeatures=[] self.similarGroups=[] self.method = 'NA' self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] self.modelSelTopFeatures=[] self.topFeatures=[] self.allFeatures=[] def startScriptExecution(self, config_obj): rowfilterexpression = '' grouperbyjson = '' model_tried='' learner_type = '' topics = {} numericContinuousFeatures='' discreteFeatures='' threshold=-1 targetColumn = '' categoricalFeatures='' dataFolderLocation = '' original_data_file = '' profiled_data_file = '' trained_data_file = '' predicted_data_file='' featureReduction = 'False' reduction_data_file='' params={} score = 0 labelMaps={} featureDataShape=[] self.riverModels = [] self.riverAlgoNames = ['Online Logistic Regression', 'Online Softmax Regression', 'Online Decision Tree Classifier', 'Online KNN Classifier', 'Online Linear Regression', 'Online Bayesian Linear Regression', 'Online Decision Tree Regressor','Online KNN Regressor'] #ConfigSettings iterName,iterVersion,dataLocation,deployLocation,delimiter,textqualifier = config_obj.getAIONLocationSettings() scoreParam = config_obj.getScoringCreteria() datetimeFeature,indexFeature,modelFeatures=config_obj.getFeatures() iterName = iterName.replace(" ", "_") deployLocation,dataFolderLocation,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,logFileName,outputjsonFile = config_obj.createDeploymentFolders(deployLocation,iterName,iterVersion) #Mlflow mlflowSetPath(deployLocation,iterName+'_'+iterVersion) #Logger filehandler = logging.FileHandler(logFileName, 'w','utf-8') formatter = logging.Formatter('%(message)s') filehandler.setFormatter(formatter) log = logging.getLogger('eion') log.propagate = False for hdlr in log.handlers[:]: # remove the existing file handlers if isinstance(hdlr,logging.FileHandler): log.removeHandler(hdlr) log.addHandler(filehandler) log.setLevel(logging.INFO) log.info('************* Version - v2.2.5 *************** \n') msg = '-------> Execution Start Time: '+ datetime.datetime.now(timezone("Asia/Kolkata")).strftime('%Y-%m-%d %H:%M:%S' + ' IST') log.info(msg) startTime = timeit.default_timer() try: output = {'bestModel': '', 'bestScore': 0, 'bestParams': {}} #ConfigSetting problemType,targetFeature,profilerStatus,selectorStatus,learnerStatus,visualizationstatus,deployStatus = config_obj.getModulesDetails() selectorStatus = False if(problemType.lower() in ['classification','regression']): if(targetFeature == ''): output = {"status":"FAIL","message":"Target Feature is Must for Classification and Regression Problem Type"} return output #DataReading from transformations.dataReader import dataReader objData = dataReader() if os.path.isfile(dataLocation): dataFrame = objData.csvTodf(dataLocation,delimiter,textqualifier) dataFrame.rename(columns=lambda x:x.strip(), inplace=True) #FilterDataframe filter = config_obj.getfilter() if filter != 'NA': dataFrame,rowfilterexpression = objData.rowsfilter(filter,dataFrame) #GroupDataframe timegrouper = config_obj.gettimegrouper() grouping = config_obj.getgrouper() if grouping != 'NA': dataFrame,grouperbyjson = objData.grouping(grouping,dataFrame) elif timegrouper != 'NA': dataFrame,grouperbyjson = objData.timeGrouping(timegrouper,dataFrame) #KeepOnlyModelFtrs dataFrame = objData.removeFeatures(dataFrame,datetimeFeature,indexFeature,modelFeatures,targetFeature) log.info('\n-------> First Ten Rows of Input Data: ') log.info(dataFrame.head(10)) self.dfNumRows=dataFrame.shape[0] self.dfNumCols=dataFrame.shape[1] dataLoadTime = timeit.default_timer() - startTime log.info('-------> COMPUTING: Total dataLoadTime time(sec) :'+str(dataLoadTime)) if profilerStatus: log.info('\n================== Data Profiler has started ==================') log.info('Status:-|... AION feature transformation started') dp_mlstart = time.time() profilerJson = config_obj.getEionProfilerConfigurarion() log.info('-------> Input dataFrame(5 Rows): ') log.info(dataFrame.head(5)) log.info('-------> DataFrame Shape (Row,Columns): '+str(dataFrame.shape)) from incremental.incProfiler import incProfiler incProfilerObj = incProfiler() dataFrame,targetColumn,self.mFeatures,self.numericalFeatures,self.nonNumericFeatures,labelMaps,self.configDict,self.textFeatures,self.emptyFeatures,self.wordToNumericFeatures = incProfilerObj.startIncProfiler(dataFrame,profilerJson,targetFeature,deployLocation,problemType) self.features = self.configDict['allFtrs'] log.info('-------> Data Frame Post Data Profiling(5 Rows): ') log.info(dataFrame.head(5)) log.info('Status:-|... AION feature transformation completed') dp_mlexecutionTime=time.time() - dp_mlstart log.info('-------> COMPUTING: Total Data Profiling Execution Time '+str(dp_mlexecutionTime)) log.info('================== Data Profiling completed ==================\n') dataFrame.to_csv(profiled_data_file,index=False) selectorStatus = False if learnerStatus: log.info('Status:-|... AION Learner data preparation started') ldp_mlstart = time.time() testPercentage = config_obj.getAIONTestTrainPercentage() balancingMethod = config_obj.getAIONDataBalancingMethod() from learner.machinelearning import machinelearning mlobj = machinelearning() modelType = problemType.lower() targetColumn = targetFeature if modelType == "na": if self.targetType == 'categorical': modelType = 'classification' elif self.targetType == 'continuous': modelType = 'regression' datacolumns=list(dataFrame.columns) if targetColumn in datacolumns: datacolumns.remove(targetColumn) features =datacolumns featureData = dataFrame[features] if targetColumn != '': targetData = dataFrame[targetColumn] xtrain,ytrain,xtest,ytest = mlobj.split_into_train_test_data(featureData,targetData,testPercentage,modelType) categoryCountList = [] if modelType == 'classification': if(mlobj.checkForClassBalancing(ytrain) >= 1): xtrain,ytrain = mlobj.ExecuteClassBalancing(xtrain,ytrain,balancingMethod) valueCount=targetData.value_counts() categoryCountList=valueCount.tolist() ldp_mlexecutionTime=time.time() - ldp_mlstart log.info('-------> COMPUTING: Total Learner data preparation Execution Time '+str(ldp_mlexecutionTime)) log.info('Status:-|... AION Learner data preparation completed') if learnerStatus: log.info('\n================== ML Started ==================') log.info('Status:-|... AION training started') log.info('-------> Memory Usage by DataFrame During Learner Status '+str(dataFrame.memory_usage(deep=True).sum())) mlstart = time.time() log.info('-------> Target Problem Type:'+ self.targetType) learner_type = 'ML' learnerJson = config_obj.getEionLearnerConfiguration() log.info('-------> Target Model Type:'+ modelType) modelParams,modelList = config_obj.getEionLearnerModelParams(modelType) if(modelType == 'regression'): allowedmatrix = ['mse','r2','rmse','mae'] if(scoreParam.lower() not in allowedmatrix): scoreParam = 'mse' if(modelType == 'classification'): allowedmatrix = ['accuracy','recall','f1_score','precision','roc_auc'] if(scoreParam.lower() not in allowedmatrix): scoreParam = 'accuracy' scoreParam = scoreParam.lower() from incremental.incMachineLearning import incMachineLearning incMlObj = incMachineLearning(mlobj) self.configDict['riverModel'] = False status,model_type,model,saved_model,matrix,trainmatrix,featureDataShape,model_tried,score,filename,self.features,threshold,pscore,rscore,self.method,loaded_model,xtrain1,ytrain1,xtest1,ytest1,topics,params=incMlObj.startLearning(learnerJson,modelType,modelParams,modelList,scoreParam,self.features,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,self.targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps) if model in self.riverAlgoNames: self.configDict['riverModel'] = True if(self.matrix != '{'): self.matrix += ',' if(self.trainmatrix != '{'): self.trainmatrix += ',' self.trainmatrix += trainmatrix self.matrix += matrix mlexecutionTime=time.time() - mlstart log.info('-------> Total ML Execution Time '+str(mlexecutionTime)) log.info('Status:-|... AION training completed') log.info('================== ML Completed ==================\n') if visualizationstatus: visualizationJson = config_obj.getEionVisualizationConfiguration() log.info('Status:-|... AION Visualizer started') visualizer_mlstart = time.time() from visualization.visualization import Visualization visualizationObj = Visualization(iterName,iterVersion,dataFrame,visualizationJson,datetimeFeature,deployLocation,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,self.features,targetFeature,model_type,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,self.vectorizerFeatures,self.textFeatures,self.numericalFeatures,self.nonNumericFeatures,self.emptyFeatures,self.dfNumRows,self.dfNumCols,saved_model,scoreParam,learner_type,model,featureReduction,reduction_data_file) visualizationObj.visualizationrecommandsystem() visualizer_mlexecutionTime=time.time() - visualizer_mlstart log.info('-------> COMPUTING: Total Visualizer Execution Time '+str(visualizer_mlexecutionTime)) log.info('Status:-|... AION Visualizer completed') try: os.remove(os.path.join(deployLocation,'aion_xai.py')) except: pass if deployStatus: if str(model) != 'None': log.info('\n================== Deployment Started ==================') log.info('Status:-|... AION Deployer started') deployPath = deployLocation deployer_mlstart = time.time() src = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','useCaseFiles') shutil.copy2(os.path.join(src,'incBatchLearning.py'),deployPath) os.rename(os.path.join(deployPath,'incBatchLearning.py'),os.path.join(deployPath,'aion_inclearning.py')) shutil.copy2(os.path.join(src,'incBatchPrediction.py'),deployPath) os.rename(os.path.join(deployPath,'incBatchPrediction.py'),os.path.join(deployPath,'aion_predict.py')) self.configDict['modelName'] = str(model) self.configDict['modelParams'] = params self.configDict['problemType'] = problemType.lower() self.configDict['score'] = score self.configDict['metricList'] = [] self.configDict['metricList'].append(score) self.configDict['trainRowsList'] = [] self.configDict['trainRowsList'].append(featureDataShape[0]) self.configDict['scoreParam'] = scoreParam self.configDict['partialFit'] = 0 with open(os.path.join(deployLocation,'production', 'Config.json'), 'w', encoding='utf8') as f: json.dump(self.configDict, f, ensure_ascii=False) deployer_mlexecutionTime=time.time() - deployer_mlstart log.info('-------> COMPUTING: Total Deployer Execution Time '+str(deployer_mlexecutionTime)) log.info('Status:-|... AION Batch Deployment completed') log.info('================== Deployment Completed ==================') # self.features = profilerObj.set_features(self.features,self.textFeatures,self.vectorizerFeatures) self.matrix += '}' self.trainmatrix += '}' matrix = eval(self.matrix) trainmatrix = eval(self.trainmatrix) model_tried = eval('['+model_tried+']') try: json.dumps(params) output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":params,"EvaluatedModels":model_tried,"LogFile":logFileName}} except: output_json = {"status":"SUCCESS","data":{"ModelType":modelType,"deployLocation":deployPath,"BestModel":model,"BestScore":str(score),"ScoreType":str(scoreParam).upper(),"matrix":matrix,"trainmatrix":trainmatrix,"featuresused":str(self.features),"targetFeature":str(targetColumn),"params":"","EvaluatedModels":model_tried,"LogFile":logFileName}} print(output_json) if bool(topics) == True: output_json['topics'] = topics with open(outputjsonFile, 'w') as f: json.dump(output_json, f) output_json = json.dumps(output_json) log.info('\n------------- Summary ------------') log.info('------->No of rows & columns in data:('+str(self.dfNumRows)+','+str(self.dfNumCols)+')') log.info('------->No of missing Features :'+str(len(self.mFeatures))) log.info('------->Missing Features:'+str(self.mFeatures)) log.info('------->Text Features:'+str(self.textFeatures)) log.info('------->No of Nonnumeric Features :'+str(len(self.nonNumericFeatures))) log.info('------->Non-Numeric Features :' +str(self.nonNumericFeatures)) if threshold == -1: log.info('------->Threshold: NA') else: log.info('------->Threshold: '+str(threshold)) log.info('------->Label Maps of Target Feature for classification :'+str(labelMaps)) if((learner_type != 'TS') & (learner_type != 'AR')): log.info('------->No of columns and rows used for Modeling :'+str(featureDataShape)) log.info('------->Features Used for Modeling:'+str(self.features)) log.info('------->Target Feature: '+str(targetColumn)) log.info('------->Best Model Score :'+str(score)) log.info('------->Best Parameters:'+str(params)) log.info('------->Type of Model :'+str(modelType)) log.info('------->Best Model :'+str(model)) log.info('------------- Summary ------------\n') except Exception as inst: log.info('server code execution failed !....'+str(inst)) output_json = {"status":"FAIL","message":str(inst).strip('"')} output_json = json.dumps(output_json) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) executionTime = timeit.default_timer() - startTime log.info('\nTotal execution time(sec) :'+str(executionTime)) log.info('\n------------- Output JSON ------------') log.info('-------> Output :'+str(output_json)) log.info('------------- Output JSON ------------\n') for hdlr in log.handlers[:]: # remove the existing file handlers if isinstance(hdlr,logging.FileHandler): hdlr.close() log.removeHandler(hdlr) return output_json def aion_ot_train_model(arg): warnings.filterwarnings('ignore') try: valid, msg = pushRecordForOnlineTraining() if valid: serverObj = server() configObj = OTAionConfigManager() jsonPath = arg readConfistatus,msg = configObj.readConfigurationFile(jsonPath) if(readConfistatus == False): output = {"status":"FAIL","message":str(msg).strip('"')} output = json.dumps(output) print("\n") print("aion_learner_status:",output) print("\n") return output output = serverObj.startScriptExecution(configObj) else: output = {"status":"LicenseVerificationFailed","message":str(msg).strip('"')} output = json.dumps(output) print("\n") print("aion_learner_status:",output) print("\n") return output except Exception as inst: output = {"status":"FAIL","message":str(inst).strip('"')} output = json.dumps(output) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) print("\n") print("aion_learner_status:",output) print("\n") return output if __name__ == "__main__": aion_ot_train_model(sys.argv[1])
aion_mlac.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import os from pathlib import Path os.chdir(Path(__file__).parent) import json import shutil from mlac.timeseries import app as ts_app from mlac.ml import app as ml_app import traceback def create_test_file(config): code_file = 'aionCode.py' text = """ from pathlib import Path import subprocess import sys import json import argparse def run_pipeline(data_path): print('Data Location:', data_path) cwd = Path(__file__).parent monitor_file = str(cwd/'ModelMonitoring'/'{code_file}') load_file = str(cwd/'DataIngestion'/'{code_file}') transformer_file = str(cwd/'DataTransformation'/'{code_file}') selector_file = str(cwd/'FeatureEngineering'/'{code_file}') train_folder = cwd register_file = str(cwd/'ModelRegistry'/'{code_file}') deploy_file = str(cwd/'ModelServing'/'{code_file}') print('Running modelMonitoring') cmd = [sys.executable, monitor_file, '-i', data_path] result = subprocess.check_output(cmd) result = result.decode('utf-8') print(result) result = json.loads(result[result.find('{search}'):]) if result['Status'] == 'Failure': exit() print('Running dataIngestion') cmd = [sys.executable, load_file] result = subprocess.check_output(cmd) result = result.decode('utf-8') print(result) result = json.loads(result[result.find('{search}'):]) if result['Status'] == 'Failure': exit() print('Running DataTransformation') cmd = [sys.executable, transformer_file] result = subprocess.check_output(cmd) result = result.decode('utf-8') print(result) result = json.loads(result[result.find('{search}'):]) if result['Status'] == 'Failure': exit() print('Running FeatureEngineering') cmd = [sys.executable, selector_file] result = subprocess.check_output(cmd) result = result.decode('utf-8') print(result) result = json.loads(result[result.find('{search}'):]) if result['Status'] == 'Failure': exit() train_models = [f for f in train_folder.iterdir() if 'ModelTraining' in f.name] for model in train_models: print('Running',model.name) cmd = [sys.executable, str(model/'{code_file}')] train_result = subprocess.check_output(cmd) train_result = train_result.decode('utf-8') print(train_result) print('Running ModelRegistry') cmd = [sys.executable, register_file] result = subprocess.check_output(cmd) result = result.decode('utf-8') print(result) result = json.loads(result[result.find('{search}'):]) if result['Status'] == 'Failure': exit() print('Running ModelServing') cmd = [sys.executable, deploy_file] result = subprocess.check_output(cmd) result = result.decode('utf-8') print(result) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-i', '--inputPath', help='path of the input data') args = parser.parse_args() if args.inputPath: filename = args.inputPath else: filename = r"{filename}" try: print(run_pipeline(filename)) except Exception as e: print(e) """.format(filename=config['dataLocation'],search='{"Status":',code_file=code_file) deploy_path = Path(config["deploy_path"])/'MLaC' deploy_path.mkdir(parents=True, exist_ok=True) py_file = deploy_path/"run_pipeline.py" with open(py_file, "w") as f: f.write(text) def is_module_in_req_file(mod, folder): status = False if (Path(folder)/'requirements.txt').is_file(): with open(folder/'requirements.txt', 'r') as f: status = mod in f.read() return status def copy_local_modules(config): deploy_path = Path(config["deploy_path"]) local_modules_location = config.get("local_modules_location", None) if local_modules_location: folder_loc = local_modules_location else: folder_loc = Path(__file__).parent/'local_modules' if not folder_loc.exists(): folder_loc = None if folder_loc: file = folder_loc/'config.json' if file.exists(): with open(file, 'r') as f: data = json.load(f) for key, values in data.items(): local_module = folder_loc/key if local_module.exists(): for folder in values: target_folder = Path(deploy_path)/'MLaC'/folder if target_folder.is_dir(): if is_module_in_req_file(key, target_folder): shutil.copy(local_module, target_folder) def validate(config): error = '' if 'error' in config.keys(): error = config['error'] return error def generate_mlac_code(config): with open(config, 'r') as f: config = json.load(f) error = validate(config) if error: raise ValueError(error) if config['problem_type'] in ['classification','regression']: return generate_mlac_ML_code(config) elif config['problem_type'].lower() == 'timeseriesforecasting': #task 11997 return generate_mlac_TS_code(config) def generate_mlac_ML_code(config): try: ml_app.run_loader(config) ml_app.run_transformer(config) ml_app.run_selector(config) ml_app.run_trainer(config) ml_app.run_register(config) ml_app.run_deploy(config) ml_app.run_drift_analysis(config) copy_local_modules(config) create_test_file(config) status = {'Status':'SUCCESS','MLaC_Location':str(Path(config["deploy_path"])/'MLaC')} except Exception as Inst: status = {'Status':'Failure','msg':str(Inst)} traceback.print_exc() status = json.dumps(status) return(status) def generate_mlac_TS_code(config): try: ts_app.run_loader(config) ts_app.run_transformer(config) ts_app.run_selector(config) ts_app.run_trainer(config) ts_app.run_register(config) ts_app.run_deploy(config) ts_app.run_drift_analysis(config) create_test_file(config) status = {'Status':'SUCCESS','MLaC_Location':str(Path(config["deploy_path"])/'MLaC')} except Exception as Inst: status = {'Status':'Failure','msg':str(Inst)} traceback.print_exc() status = json.dumps(status) return(status)

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
0
Add dataset card