file
stringlengths
6
44
content
stringlengths
38
162k
openai_embedding.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import openai import tiktoken import numpy as np import pandas as pd from pathlib import Path from openai.embeddings_utils import get_embedding from sklearn.base import BaseEstimator, TransformerMixin class embedding(BaseEstimator, TransformerMixin): def __init__(self, embedding_engine='Text-Embedding', embedding_ctx_size=8191, encoding_method='cl100k_base'): self.embedding_engine = embedding_engine self.embedding_ctx_size = embedding_ctx_size self.encoding_method = encoding_method self.number_of_features = 1536 def fit(self,X,y=None): return self def transform(self, X): setup_openai() X = map(lambda text: self.len_safe_get_embedding( text), X) return list(X) def split_large_text(self, large_text): encoding = tiktoken.get_encoding( self.encoding_method) tokenized_text = encoding.encode(large_text) chunks = [] current_chunk = [] current_length = 0 for token in tokenized_text: current_chunk.append(token) current_length += 1 if current_length >= self.embedding_ctx_size: chunks.append(encoding.decode(current_chunk).rstrip(' .,;')) current_chunk = [] current_length = 0 if current_chunk: chunks.append(encoding.decode(current_chunk).rstrip(' .,;')) return chunks def len_safe_get_embedding(self, text): chunk_embeddings = [] chunk_lens = [] for chunk in self.split_large_text(text): chunk_embeddings.append( get_embedding(chunk, engine=self.embedding_engine)) chunk_lens.append(len(chunk)) chunk_embeddings = np.average(chunk_embeddings, axis=0, weights=None) chunk_embeddings = chunk_embeddings / np.linalg.norm(chunk_embeddings) # normalizes length to 1 chunk_embeddings = chunk_embeddings.tolist() return chunk_embeddings def get_feature_names_out(self): return [str(x) for x in range(self.number_of_features)] def get_feature_names(self): return self.get_feature_names_out() """ Open AI initialization has to be done separately as follows: 1. During training read the parameters from user a. from config b. SQLite database c. From Json file """ class setup_openai(): def __init__( self, config=None): param_keys = ['api_type','api_key','api_base','api_version'] if isinstance(config, dict): valid_params = {x:y for x,y in config.items() if x in param_keys} self._update_params(valid_params) elif self._is_sqlite(): self._update_params( self._get_cred_from_sqlite()) elif ((Path(__file__).parent.parent/'etc')/'openai.json').exists(): with open(((Path(__file__).parent.parent/'etc')/'openai.json'), 'r') as f: import json params = json.load(f) valid_params = {x:y for x,y in params.items() if x in param_keys} self._update_params(valid_params) else: raise ValueError('Open AI credentials are not provided.') def _is_sqlite(self): try: from AION.appbe.sqliteUtility import sqlite_db from AION.appbe.dataPath import DATA_DIR db_dir = Path(DATA_DIR)/'sqlite' db_file = 'config.db' if (db_dir/db_file).exists(): sqlite_obj = sqlite_db(db_dir,db_file) if sqlite_obj.table_exists('openai'): return True return False except: return False def _get_cred_from_sqlite(self): from AION.appbe.sqliteUtility import sqlite_db from AION.appbe.dataPath import DATA_DIR db_dir = Path(DATA_DIR)/'sqlite' db_file = 'config.db' sqlite_obj = sqlite_db(db_dir,db_file) data = sqlite_obj.read_data('openai')[0] param_keys = ['api_type','api_key','api_base','api_version'] return dict((x,y) for x,y in zip(param_keys,data)) def _update_params(self, valid_params): for key, value in valid_params.items(): if key == 'api_type': openai.api_type = value elif key == 'api_key': openai.api_key = value elif key == 'api_base': openai.api_base = value elif key == 'api_version': openai.api_version = value
eda.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import sys import logging from collections import Counter import spacy import numpy as np import pandas as pd import nltk from nltk.corpus import stopwords from nltk import pos_tag from nltk.tokenize import word_tokenize from nltk.stem.wordnet import WordNetLemmatizer from textblob import TextBlob from sklearn.feature_extraction.text import CountVectorizer ''' nltk.download("punkt") nltk.download("wordnet") ''' stopWords = stopwords.words("english") class ExploreTextData: def __init__(self, logEnabled=False): self.logEnabled = logEnabled def __Log(self, logType="info", text=None): if logType.lower() == "exception": logging.exception( text) elif self.logEnabled: if logType.lower() == "info": logging.info( text) elif logType.lower() == "debug": logging.debug( text) def Describe(self, inputCorpus): """ Generate descriptive statistics for length of documents. Parameters ---------- inputCorpus: sequence of input documents where each document consists of paragraphs or sentences Returns ------- dict Summary statistics of the Series or Dataframe provided. """ try: self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name)) stat = {} word_count = self.DocumentWordCount(inputCorpus) stat['count'] = float(len(word_count)) stat['mean'] = float(word_count.mean()) stat['std'] = float(word_count.std()) stat['max'] = float(word_count.max()) stat['min'] = float(word_count.min()) return pd.DataFrame.from_dict(stat, orient='index') except: self.__Log("exception", sys.exc_info()) raise def DocumentLength(self, inputCorpus): """ Calculate the length of each document in corpus Parameters ---------- inputCorpus: sequence of input documents where each document consists of paragraphs or sentences Returns ------- pandas.Series of {int} series of length of documents """ try: self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name)) return inputCorpus.str.len() except: self.__Log("exception", sys.exc_info()) raise def DocumentWordCount(self, inputCorpus): """ Calculate the number of words in each document in corpus Parameters ---------- inputCorpus: sequence of input documents where each document consists of paragraphs or sentences Returns ------- pandas.Series of {int} series of number of words in documents """ try: self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name)) return inputCorpus.str.split().map(lambda x: len(x)) except: self.__Log("exception", sys.exc_info()) raise def AverageWordLength(self, inputCorpus): """ Calculate the average length of words in each document in corpus Parameters ---------- inputCorpus: sequence of input documents where each document consists of paragraphs or sentences Returns ------- pandas.Series of {double} series of average length of words in documents """ try: self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name)) return inputCorpus.str.split()\ .apply(lambda x: [len(i) for i in x])\ .map(lambda x: np.mean(x)) except: self.__Log("exception", sys.exc_info()) raise def StopWordsCount(self, inputCorpus): """ Calculate the number of stopwords in each document in corpus Parameters ---------- inputCorpus: sequence of input documents where each document consists of paragraphs or sentences Returns ------- pandas.Series of {int} series of count of stopwords in documents """ try: self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name)) stopWordsCount = [] inputCorpus = list(inputCorpus) for doc in inputCorpus: count = 0 for word in doc.split(): if word in stopWords: count += 1 stopWordsCount.append(count) return pd.Series(stopWordsCount) except: self.__Log("exception", sys.exc_info()) raise def MostCommonWords(self, inputCorpus, num_of_words=40): """ get the most common words in corpus Parameters ---------- inputCorpus: sequence of input documents where each document consists of paragraphs or sentences Returns ------- Pandas.DataFrame{string, int} Dataframe with columns "most_common_words" and "freq" """ try: self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name)) new = inputCorpus.str.split() new = new.values.tolist() corpus = [word for i in new for word in i if word not in stopWords] counter = Counter(corpus) most = counter.most_common() x, y = [], [] for word, count in most[: num_of_words + 1]: x.append(word) y.append(count) return pd.DataFrame([x, y],index=['most_common_words', 'freq']).T except: self.__Log("exception", sys.exc_info()) raise def NullCount(self, inputCorpus): """ Calculate the number of null entries in corpus Parameters ---------- inputCorpus: sequence of input documents where each document consists of paragraphs or sentences Returns ------- int count of null entries in corpus """ try: self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name)) return pd.Series(inputCorpus.isnull().sum()) except: self.__Log("exception", sys.exc_info()) raise def TopNgram(self, inputCorpus, ngram, num_of_words=10): """ Get the top words from the ngrams Parameters ---------- inputCorpus: sequence of input documents where each document consists of paragraphs or sentences ngram: int ngram required num_of_words:int, optional numbers of words to be returned Returns ------- Pandas.DataFrame{string, int} Dataframe with columns "ngram_words" and "freq" """ try: self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name)) words = [] for doc in inputCorpus: word = [w for w in word_tokenize(doc) if (w not in stopWords)] words.append(" ".join(word)) vec = CountVectorizer(ngram_range=(ngram, ngram)).fit(words) bag_of_words = vec.transform(inputCorpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()] words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)[:num_of_words] words = [] frequency = [] for word, freq in words_freq: words.append(word) frequency.append(freq) return pd.DataFrame([words, frequency],index=['ngram_words', 'freq']).T except: self.__Log("exception", sys.exc_info()) raise def Polarity(self, inputCorpus): """ Get the polarity of the text Parameters ---------- inputCorpus: sequence of input documents where each document consists of paragraphs or sentences Returns ------- pandas.Series {double} series of calculated polarity of the documents """ try: self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name)) return inputCorpus.apply(lambda x: TextBlob(x).sentiment.polarity) except: self.__Log("exception", sys.exc_info()) raise def ReadabilityScore(self, inputCorpus): """ Get the Readability Score of the text Parameters ---------- inputCorpus: sequence of input documents where each document consists of paragraphs or sentences Returns ------- pandas.Series {double} series of calculated Readability Score of the documents """ import textstat try: self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name)) if isinstance(inputCorpus, pd.Series): return pd.Series([textstat.flesch_reading_ease(text) for text in inputCorpus]) else: return [textstat.flesch_reading_ease(inputCorpus)] except: self.__Log("exception", sys.exc_info()) raise def TagEntityCount(self, inputCorpus): """ Calculate the frequency of each entity present in documents Parameters ---------- inputCorpus: sequence of input documents where each document consists of paragraphs or sentences Returns ------- Pandas.DataFrame{string, int} Dataframe with columns "entity" and "freq" """ def ner(text): doc = nlp(text) return [X.label_ for X in doc.ents] try: self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name)) nlp = spacy.load("en_core_web_sm") ent = inputCorpus.apply(lambda x: ner(x)) ent = [x for sub in ent for x in sub] counter = Counter(ent) count = counter.most_common() x, y = map(list, zip(*count)) return pd.DataFrame([x, y],index=['entity', 'freq']).T except: self.__Log("exception", sys.exc_info()) raise def MostCommonTokenPerEntity(self, inputCorpus, entity="GPE"): """ Get the frequency of most common words corresponding to the specified entity in documents Parameters ---------- inputCorpus: sequence of input documents where each document consists of paragraphs or sentences entity: string, optional name of the entity corresponding to which words are counted Returns ------- Pandas.DataFrame{string, int} Dataframe with columns "token" and "freq" """ def ner(text, ent): doc = nlp(text) return [X.text for X in doc.ents if X.label_ == ent] try: self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name)) nlp = spacy.load("en_core_web_sm") gpe = inputCorpus.apply(lambda x: ner(x, entity.upper())) gpe = [i for x in gpe for i in x] counter = Counter(gpe) x, y = map(list, zip(*counter.most_common(10))) return pd.DataFrame([x, y],index=['token', 'freq']).T except: self.__Log("exception", sys.exc_info()) raise def MostCommonPosTag(self, inputCorpus): """ Get the frequency of most common POS tag present in documents Parameters ---------- inputCorpus: sequence of input documents where each document consists of paragraphs or sentences Returns ------- Pandas.DataFrame{string, int} Dataframe with columns "postag" and "freq" """ def pos(text): pos = pos_tag(word_tokenize(text)) pos = list(map(list, zip(*pos)))[1] return pos try: self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name)) tags = inputCorpus.apply(lambda x: pos(x)) tags = [x for l in tags for x in l] counter = Counter(tags) x, y = list(map(list, zip(*counter.most_common(7)))) return pd.DataFrame([x, y],index=['postag', 'freq']).T except: self.__Log("exception", sys.exc_info()) raise def MostCommonWordsInPOSTag(self, inputCorpus, tag="NN"): """ Get the frequency of most common words related to specified POS tag present in documents Parameters ---------- inputCorpus: sequence of input documents where each document consists of paragraphs or sentences tag: string, optional POS tag corresponding to which words frequency will be calculated Returns ------- Pandas.DataFrame{string, int} Dataframe with columns "words" and "freq" """ def get_POSTag(text, tag): adj = [] pos = pos_tag(word_tokenize(text)) for word, tg in pos: if tg == tag: adj.append(word) return adj try: self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name)) words = inputCorpus.apply(lambda x: get_POSTag(x, tag.upper())) words = [x for l in words for x in l] counter = Counter(words) x = [] y = [] if len(counter): x, y = list(map(list, zip(*counter.most_common(7)))) return pd.DataFrame([x, y],index=['words', 'freq']).T except: self.__Log("exception", sys.exc_info()) raise def __preprocessData(self, inputCorpus): """ Prepare the data for topic modelling """ try: self.__Log("info", "Start of {} function".format(sys._getframe().f_code.co_name)) corpus = [] lem = WordNetLemmatizer() for doc in inputCorpus: words = [w for w in word_tokenize(doc) if (w not in stopWords)] words = [lem.lemmatize(w) for w in words if len(w) > 2] corpus.append(words) return corpus except: self.__Log("exception", sys.exc_info()) raise
TextCleaning.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import re import string import sys import demoji #demoji.download_codes() import nltk import spacy from nltk.corpus import stopwords from bs4 import BeautifulSoup from text_unidecode import unidecode from textblob import TextBlob from spellchecker import SpellChecker from nltk import pos_tag from nltk.tokenize import word_tokenize from nltk.corpus import wordnet from nltk.stem.wordnet import WordNetLemmatizer from nltk.stem.porter import PorterStemmer from spacy.lang.en import English from collections import defaultdict import contractions spacy_nlp = None def WordTokenize( inputText, tokenizationLib = 'nltk'): tokenList = [] if inputText is not None and inputText != "": tokenizationLib = tokenizationLib.lower() if tokenizationLib == 'nltk': tokenList = word_tokenize(inputText) elif tokenizationLib == 'textblob': tbObj = TextBlob(inputText) tokenList = tbObj.words elif tokenizationLib == 'spacy': nlp = English() nlpDoc = nlp(inputText) for token in nlpDoc: tokenList.append(token.text) elif tokenizationLib == 'keras': from tensorflow.keras.preprocessing.text import text_to_word_sequence tokenList = text_to_word_sequence(inputText) else: tokenList = word_tokenize(inputText) return tokenList def SentenceTokenize( inputText): sentenceList = [] if inputText is not None and inputText != "": sentenceList = sent_tokenize(inputText) return sentenceList def Lemmatize(inputTokensList, lemmatizationLib = 'nltk'): global spacy_nlp lemmatized_list= [] lemmatizationLib = lemmatizationLib.lower() if (inputTokensList is not None) and (len(inputTokensList)!=0): if (lemmatizationLib == 'textblob'): inputText = " ".join(inputTokensList) sent = TextBlob(inputText) tag_dict = {"J": 'a', "N": 'n', "V": 'v', "R": 'r'} words_and_tags = [(w, tag_dict.get(pos[0], 'n')) for w, pos in sent.tags] lemmatized_list = [wd.lemmatize(tag) for wd, tag in words_and_tags] if (lemmatizationLib == 'spacy'): inputText = " ".join(inputTokensList) if spacy_nlp == None: spacy_nlp = spacy.load('en_core_web_sm') doc = spacy_nlp(inputText) for token in doc: if token.text != token.lemma_: if token.lemma_ != "-PRON-": lemmatized_list.append(token.lemma_) else: lemmatized_list.append(token.text) else: lemmatized_list.append(token.text) else: tag_map = defaultdict(lambda : wordnet.NOUN) tag_map['J'] = wordnet.ADJ tag_map['V'] = wordnet.VERB tag_map['R'] = wordnet.ADV wnLemmatizer = WordNetLemmatizer() token_tags = pos_tag(inputTokensList) lemmatized_list = [wnLemmatizer.lemmatize(token, tag_map[tag[0]]) for token, tag in token_tags] return lemmatized_list def Stemmize(inputTokensList): stemmedTokensList= [] if (inputTokensList is not None) and (len(inputTokensList)!=0): porterStemmer = PorterStemmer() stemmedTokensList = [porterStemmer.stem(token) for token in inputTokensList] return stemmedTokensList def ToLowercase(inputText): resultText = "" if inputText is not None and inputText != "": resultText = inputText.lower() return resultText def ToUppercase(inputText): resultText = "" if inputText is not None and inputText != '': resultText = inputText.upper() return resultText def RemoveNoise(inputText, removeNoise_fHtmlDecode = True, removeNoise_fRemoveHyperLinks = True, removeNoise_fRemoveMentions = True, removeNoise_fRemoveHashtags = True, removeNoise_RemoveOrReplaceEmoji = 'remove', removeNoise_fUnicodeToAscii = True, removeNoise_fRemoveNonAscii = True): if inputText is not None and inputText != "": if removeNoise_fHtmlDecode == True: inputText = BeautifulSoup(inputText, "html.parser").text if removeNoise_fRemoveHyperLinks == True: inputText = re.sub(r'https?:\/\/\S*', '', inputText, flags=re.MULTILINE) if removeNoise_fRemoveMentions == True: inputText = re.sub('[@]+\S+','', inputText) if removeNoise_fRemoveHashtags == True: inputText = re.sub('[#]+\S+','', inputText) if removeNoise_RemoveOrReplaceEmoji == 'remove': inputText = demoji.replace(inputText, "") elif removeNoise_RemoveOrReplaceEmoji == 'replace': inputText = demoji.replace_with_desc(inputText, " ") if removeNoise_fUnicodeToAscii == True: inputText = unidecode(inputText) if removeNoise_fRemoveNonAscii == True: inputText= re.sub(r'[^\x00-\x7F]+',' ', inputText) inputText = re.sub(r'\s+', ' ', inputText) inputText = inputText.strip() return inputText def RemoveStopwords(inputTokensList, stopwordsRemovalLib='nltk', stopwordsList = None, extend_or_replace='extend'): resultTokensList = [] if (inputTokensList is not None) and (len(inputTokensList)!=0): stopwordsRemovalLib= stopwordsRemovalLib.lower() if stopwordsRemovalLib == 'spacy': nlp = English() stopwordRemovalList = nlp.Defaults.stop_words else: stopwordRemovalList = set(stopwords.words('english')) if extend_or_replace == 'replace': if stopwordsList is not None: stopwordRemovalList = set(stopwordsList) else: if stopwordsList: stopwordRemovalList = stopwordRemovalList.union(set(stopwordsList)) resultTokensList = [word for word in inputTokensList if word not in stopwordRemovalList] return resultTokensList def RemoveNumericTokens(inputText, removeNumeric_fIncludeSpecialCharacters=True): resultText = "" if inputText is not None and inputText != "": if removeNumeric_fIncludeSpecialCharacters == True: #Remove tokens having numbers and punctuations resultText = re.sub(r'\b\d+[^a-zA-Z]*\d*\b',' ', inputText) else: #Remove only numeric tokens resultText = re.sub(r'\b\d+\b','', inputText) # convert consecutive whitespaces to single space in the results resultText = re.sub(r'\s+', ' ', resultText) return resultText def RemovePunctuation(inputText, fRemovePuncWithinTokens=False): resultText = "" if inputText is not None and len(inputText) != 0: if fRemovePuncWithinTokens == True: resultText = inputText.translate(str.maketrans("","", string.punctuation)) else: punctuationList = list(string.punctuation) tokensList = WordTokenize(inputText) resultTokensList = [word for word in tokensList if word not in punctuationList] resultText = " ".join(resultTokensList) resultText = re.sub(r'\s+', ' ', resultText) return resultText def CorrectSpelling(inputTokensList): correctedTokensList = [] if (inputTokensList is not None) and (len(inputTokensList)!=0): spell = SpellChecker() for word in inputTokensList: word = word.lower() if word not in spell: word = spell.correction(word) if word: correctedTokensList.append(word) return correctedTokensList def ReplaceAcronym(inputTokensList, acrDict=None): resultTokensList = [] if (inputTokensList is not None) and (len(inputTokensList)!=0): if ((acrDict is not None) and (len(acrDict) != 0)): acrDictLowercase = dict((key.lower(), value.lower()) for key, value in acrDict.items()) resultTokensList = [acrDictLowercase.get(token.lower(), token.lower()) for token in inputTokensList] else: resultTokensList = inputTokensList return resultTokensList def ExpandContractions(inputText): resultText = "" if inputText != '': resultText = contractions.fix(inputText) return resultText def cleanText( inputText, functionSequence = ['RemoveNoise','ExpandContractions','Normalize','ReplaceAcronym', 'CorrectSpelling','RemoveStopwords','RemovePunctuation','RemoveNumericTokens'], fRemoveNoise = True, fExpandContractions = False, fNormalize = True, fReplaceAcronym = False, fCorrectSpelling = False, fRemoveStopwords = True, fRemovePunctuation = True, fRemoveNumericTokens = True, removeNoise_fHtmlDecode = True, removeNoise_fRemoveHyperLinks = True, removeNoise_fRemoveMentions = True, removeNoise_fRemoveHashtags = True, removeNoise_RemoveOrReplaceEmoji = 'remove', removeNoise_fUnicodeToAscii = True, removeNoise_fRemoveNonAscii = True, tokenizationLib='nltk', normalizationMethod = 'Lemmatization', lemmatizationLib = 'nltk', acronymDict = None, stopwordsRemovalLib = 'nltk', stopwordsList = None, extend_or_replace_stopwordslist = 'extend', removeNumeric_fIncludeSpecialCharacters = True, fRemovePuncWithinTokens = False ): if inputText is not None and inputText != "": for function in functionSequence: if function == 'RemoveNoise': if (fRemoveNoise == True): inputText = RemoveNoise(inputText, removeNoise_fHtmlDecode, removeNoise_fRemoveHyperLinks, removeNoise_fRemoveMentions, removeNoise_fRemoveHashtags, removeNoise_RemoveOrReplaceEmoji, removeNoise_fUnicodeToAscii, removeNoise_fRemoveNonAscii) if function == 'ExpandContractions': if (fExpandContractions == True): inputText = ExpandContractions(inputText) if function == 'Normalize': if (fNormalize == True): inputTokens = WordTokenize(inputText, tokenizationLib) if (normalizationMethod == 'Stemming'): inputTokens = Stemmize(inputTokens) else: inputTokens = Lemmatize(inputTokens, lemmatizationLib) inputText = " ".join(inputTokens) if function == 'ReplaceAcronym': if fReplaceAcronym == True and (acronymDict is not None) and acronymDict != 'None': inputText = ToLowercase(inputText) inputTokens = WordTokenize(inputText, tokenizationLib) inputTokens= ReplaceAcronym(inputTokens, acronymDict) inputText = " ".join(inputTokens) if function == 'CorrectSpelling': if (fCorrectSpelling == True): try: inputTokens = WordTokenize(inputText, tokenizationLib) inputTokens = CorrectSpelling(inputTokens) inputText = " ".join(inputTokens) except Exception as e: print(e) pass if function == 'RemoveStopwords': if (fRemoveStopwords == True): inputText = ToLowercase(inputText) inputTokens = WordTokenize(inputText, tokenizationLib) inputTokens = RemoveStopwords(inputTokens, stopwordsRemovalLib, stopwordsList, extend_or_replace_stopwordslist) inputText = " ".join(inputTokens) if function == 'RemovePunctuation': if (fRemovePunctuation == True): inputText = RemovePunctuation(inputText, fRemovePuncWithinTokens) if function == 'RemoveNumericTokens': if (fRemoveNumericTokens == True): inputText = RemoveNumericTokens(inputText, removeNumeric_fIncludeSpecialCharacters) inputText = ToLowercase(inputText) return inputText
timeseries.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import json #Python sklearn & std libraries import numpy as np import pandas as pd from time_series.ts_arima_eion import eion_arima from time_series.aion_fbprophet import aion_fbprophet from time_series.timeseriesDLUnivariate import timeseriesDLUnivariate from time_series.timeseriesDLMultivariate import timeseriesDLMultivariate from time_series.tsDLMultiVrtInUniVrtOut import tsDLMultiVrtInUniVrtOut from statsmodels.tsa.vector_ar.vecm import coint_johansen from statsmodels.tsa.vector_ar.var_model import VAR from math import * from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error from math import sqrt import logging import os import sys import time import pickle from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error from statsmodels.tsa.stattools import adfuller import pmdarima as pm from statsmodels.tsa.stattools import grangercausalitytests from statsmodels.stats.stattools import durbin_watson from time_series.ts_modelvalidation import timeseriesModelTests from sklearn.utils import check_array from time_series.tsStationarySeasonalityTest import tsStationarySeasonalityTest class timeseries(): def __init__(self,tsConfig,modelconfig,modelList,data,targetFeature,dateTimeFeature,modelName,trainPercentage,usecasename,version,deployLocation,scoreParam): self.tsConfig = tsConfig self.modelconfig = modelconfig self.modelList = modelList self.data = data self.data1=data self.pred_freq = '' self.additional_regressors='' self.trainPercentage = trainPercentage self.targetFeature = targetFeature self.dateTimeFeature = dateTimeFeature self.modelName=modelName self.usecasename=usecasename self.model_fit=None self.selectedColumns = '' self.version=version self.deployLocation=deployLocation self.dictDiffCount={} self.log = logging.getLogger('eion') self.scoreParam=str(scoreParam) try: ##For bug:12280 self.data.dropna(how='all',axis=1,inplace=True) except Exception as e: self.data.fillna(0) self.log.info("data empty feature process error info:, check any text column contain empty records. if yes, please remove the column and upload the data for time series forecasting. \n"+str(e)) def var_prediction(self,no_of_prediction): tdata = self.data.drop([self.dateTimeFeature], axis=1) tdata.index = self.data[self.dateTimeFeature] lag_order = self.model_fit.k_ar predictions = self.model_fit.forecast(tdata.values[-lag_order:],steps=no_of_prediction) predictions = predictions.round(2) col = self.targetFeature.split(",") pred = pd.DataFrame(index=range(0,len(predictions)),columns=col) for j in range(0,len(col)): for i in range(0, len(predictions)): pred.iloc[i][j] = predictions[i][j] predictions = pred pred=self.invertTransformation(tdata,self.targetFeature,predictions,self.dictDiffCount) return pred def save_dl_model(self,smodel,scaler_model): try: saved_model = self.usecasename+'_'+self.version filename = os.path.join(self.deployLocation,'model',saved_model) smodel.save(filename) if scaler_model != 'NA' and scaler_model != '': scaler_filename = os.path.join(self.deployLocation,'model',saved_model+'_scaler.pkl') with open(scaler_filename, 'wb') as f: pickle.dump(scaler_model,f) f.close() else: scaler_filename = 'NA' return filename,saved_model,scaler_filename except Exception as e: print(e) def save_model(self,smodel): try: saved_model = self.usecasename+'_'+self.version+'.sav' filename = os.path.join(self.deployLocation,'model',saved_model) with open(filename, 'wb') as f: pickle.dump(smodel,f) f.close() return filename,saved_model except Exception as e: print(e) def mean_absolute_percentage_error(self,y_true, y_pred): try: y_true, y_pred=np.array(y_true), np.array(y_pred) mape=np.mean(np.abs((y_true - y_pred) / y_true+sys.float_info.epsilon)) * 100 return mape except Exception as inst: self.log.info('------------- mean_absolute_percentage_error ---------------') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) ## Fbprophet model def getfbprophetmodel(self,predicted_data_file,dataFolderLocation,tFeature): try: modelName='fbprophet' modelconfig = self.modelconfig['fbprophet'] self.targetFeature=tFeature[0] X_Train = pd.DataFrame(self.data[self.targetFeature]) try: # self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce') ##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle. try: #for non utc timestamp self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce') except: #for utc timestamp self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce',utc=True) self.data = self.data.dropna() except: pass aion_prophet_obj = aion_fbprophet(modelconfig,self.trainPercentage,self.data,self.targetFeature,self.dateTimeFeature) self.log.info('Status:- |... TimeSeries Algorithm applied: FBPROPHET') self.model_fit,mae,rmse_prophet,mse,mape,r2,pred_freq,additional_regressors,prophet_df_new = aion_prophet_obj.aion_probhet(X_Train,self.dateTimeFeature,predicted_data_file,dataFolderLocation) ## Added for additional scoring params if (self.scoreParam.lower() == "r2"): scoringparam_v=r2 self.log.info("fbprophet User selected scoring parameter is r2. r2 value: "+str(r2)) elif (self.scoreParam.lower() == "rmse"): scoringparam_v=rmse_prophet self.log.info("fbprophet User selected scoring parameter is RMSE. RMSE value: "+str(rmse_prophet)) elif (self.scoreParam.lower() == "mse"): scoringparam_v=mse self.log.info("fbprophet User selected scoring parameter is MSE. MSE value: "+str(mse)) elif (self.scoreParam.lower() == "mae"): scoringparam_v=mae self.log.info("fbprophet User selected scoring parameter is MAE. MAE value: "+str(mae)) else: scoringparam_v=rmse_prophet self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs error_matrix = '"RMSE":"'+str(round(rmse_prophet,2))+'","MAPE":"'+str(round(mape,2))+'","R2":"'+str(round(r2,2))+'","MAE":"'+str(round(mae,2))+'","MSE":"'+str(round(mse,2))+'"' self.log.info("fbprophet all scoring parameter results: "+str(error_matrix)) scoredetails = '{"Model":"FBProphet ","Score":'+str(scoringparam_v)+',"Scoring Param": "'+str(self.scoreParam.lower())+'"}' self.selectedColumns = self.targetFeature+','+self.dateTimeFeature self.selectedColumns = self.selectedColumns.split(",") self.pred_freq = pred_freq self.additional_regressors=additional_regressors self.log.info('------------- End FBPROPHET Model -------------\n') return('Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,self.model_fit,self.selectedColumns,error_matrix,scoredetails,self.dictDiffCount,self.pred_freq,self.additional_regressors,prophet_df_new) except Exception as e: self.log.info("FBProphet operation failed. error: "+str(e)) return('Error',modelName.upper(),self.scoreParam.lower(),0,None,self.selectedColumns,'','{}',self.dictDiffCount,self.pred_freq,self.additional_regressors,pd.DataFrame()) ## Arima model def get_arima_values(self): try: tFeature = self.targetFeature.split(',') if(len(tFeature) == 1): model_name = 'arima' else: self.log.info("Note: ARIMA model is going to perform only on first feature of provided target features due to data not met the VAR model constraints") self.targetFeature=tFeature[0] sesonalityChecks=True stationaryChecks=False #start checking sessonality using ch test and ocsb self.log.info(self.data.head(5)) res = pm.arima.nsdiffs(self.data[self.targetFeature], m=355, max_D=5, test="ch") # 365 since daily self.log.info('-------> Seasonality checks: %f' % res) if res >=4: self.log.info("-----------> Data is following Seasonality ") self.log.info('Status:- |... Seasonality Check Done. Data is following Seasonality ') sesonalityChecks=True else: self.log.info("-----------> Data is not following Seasonality ") self.log.info('Status:- |... Seasonality Check Done. Data is not following Seasonality') sesonalityChecks=False # end checking sessonality using ch test and ocsb # start checking stationary data for time Series series=self.data[self.targetFeature] adf_test = pm.arima.ADFTest(alpha=0.05) resultSt = adfuller(self.data[self.targetFeature]) self.log.info('ADF Statistic: %f' % resultSt[0]) self.log.info('p-value: %f' % resultSt[1]) if resultSt[1]<= 0.05: stationaryChecks=True self.log.info("the data does not have a unit root and is stationary.") self.log.info('Status:- |... Stationary Check Done. Data is stationary') else: stationaryChecks=False self.log.info("the data has a unit root and is non-stationary.") self.log.info('Status:- |... Stationary Check Done. Data is non-stationary') # End of stationary checks self.log.info('\n------------- Start Arima Model -------------') self.log.info('-------> Top 5 Rows: ') self.log.info(self.data.head(5)) eion_arima_obj = eion_arima(self.modelconfig['arima'],self.trainPercentage,sesonalityChecks,stationaryChecks) return 'Success',eion_arima_obj except Exception as e: self.log.info('<!------------- Get ARIMA Values ---------------> '+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return 'Error',None def getEncDecLSTMMultVrtInUniVrtOut(self): try: self.log.info('Status:- |... TimeSeries Algorithm applied: Encoder Decoder LSTM') modelName='encoder_decoder_lstm_mvi_uvo' modelconfig = self.modelconfig['encoder_decoder_lstm_mvi_uvo'] df = self.data targetFeature = list(self.targetFeature.split(",")) try: # df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') ##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle. try: #for non utc timestamp df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') except: #for utc timestamp df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce',utc=True) df = df.dropna() except: pass df = df.groupby(self.dateTimeFeature).mean() df = df.reset_index() tdata = df.drop([self.dateTimeFeature], axis=1) tdata.index = df[self.dateTimeFeature] #tdata = tdata[tdata.columns[tdata.columns.isin(targetFeature)]] #selectedColumns = self.targetFeature+','+self.dateTimeFeature #selectedColumns = selectedColumns.split(",") selectedColumns = tdata.columns df_predicted=None aion_dlts_obj = tsDLMultiVrtInUniVrtOut(modelconfig,self.trainPercentage,targetFeature,self.dateTimeFeature) status,mse,rmse,r2,mae,model,df_predicted,lag_order,scaler = aion_dlts_obj.lstm_encdec_mvin_uvout(tdata) if status.lower() == 'success': ## Added for additional scoring params if (self.scoreParam.lower() == "r2"): scoringparam_v=r2 self.log.info('Status:- |... Score R2(Avg) '+str(r2)) elif (self.scoreParam.lower() == "rmse"): scoringparam_v=rmse self.log.info("Status:- |... Score RMSE(Avg) "+str(rmse)) elif (self.scoreParam.lower() == "mse"): scoringparam_v=mse self.log.info("Status:- |... Score MSE(Avg) "+str(mse)) elif (self.scoreParam.lower() == "mae"): scoringparam_v=mae self.log.info("Status:- |... Score MAE(Avg) : "+str(mae)) else: scoringparam_v=rmse error_matrix = '"RMSE":"'+str(round(rmse,2))+'","MSE":"'+str(round(mse,2))+'"' error_matrix=error_matrix+',"R2":"'+str(round(r2,2))+'","MAE":"'+str(round(mae,2))+'"' self.log.info("LSTM Multivariant Input Univariate Output all scoring param results: "+str(error_matrix)) self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs scoredetails = '{"Model":"LSTM Multivariant","Score":'+str(scoringparam_v)+',"Scoring Param": "'+str(self.scoreParam.lower())+'"}' else: return 'Error',modelName.upper(),self.scoreParam.lower(),'NA',None,selectedColumns,'','{}',pd.DataFrame(),lag_order,None except Exception as e: self.log.info("getEncDecLSTMMultVrtInUniVrtOut method error. Error msg: "+str(e)) return 'Error',modelName.upper(),self.scoreParam.lower(),'NA',None,selectedColumns,'','{}',pd.DataFrame(),lag_order,None return 'Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,model,selectedColumns,error_matrix,scoredetails,df_predicted,lag_order,scaler def getLSTMMultivariate(self): try: self.log.info('Status:- |... TimeSeries Algorithm applied: LSTM') modelName='lstm' modelconfig = self.modelconfig['lstm'] df = self.data targetFeature = list(self.targetFeature.split(",")) try: # df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') ##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle. try: #for non utc timestamp df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') except: #for utc timestamp df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce',utc=True) df = df.dropna() except: pass df = df.groupby(self.dateTimeFeature).mean() df = df.reset_index() tdata = df.drop([self.dateTimeFeature], axis=1) tdata.index = df[self.dateTimeFeature] tdata = tdata[tdata.columns[tdata.columns.isin(targetFeature)]] selectedColumns = self.targetFeature+','+self.dateTimeFeature selectedColumns = selectedColumns.split(",") df_predicted=None aion_dlts_obj = timeseriesDLMultivariate(modelconfig,self.trainPercentage,targetFeature,self.dateTimeFeature) status,mse,rmse,r2,mae,model,df_predicted,lag_order,scaler = aion_dlts_obj.lstm_multivariate(tdata) if status.lower() == 'success': ## Added for additional scoring params if (self.scoreParam.lower() == "r2"): scoringparam_v=r2 self.log.info('Status:- |... Score R2(Avg) '+str(r2)) elif (self.scoreParam.lower() == "rmse"): scoringparam_v=rmse self.log.info("Status:- |... Score RMSE(Avg) "+str(rmse)) elif (self.scoreParam.lower() == "mse"): scoringparam_v=mse self.log.info("Status:- |... Score MSE(Avg) "+str(mse)) elif (self.scoreParam.lower() == "mae"): scoringparam_v=mae self.log.info("Status:- |... Score MAE(Avg) : "+str(mae)) else: scoringparam_v=rmse error_matrix = '"RMSE":"'+str(round(rmse,2))+'","MSE":"'+str(round(mse,2))+'"' error_matrix=error_matrix+',"R2":"'+str(round(r2,2))+'","MAE":"'+str(round(mae,2))+'"' self.log.info("LSTM Multivariant all scoring param results: "+str(error_matrix)) self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs scoredetails = '{"Model":"LSTM Multivariant","Score":'+str(scoringparam_v)+',"Scoring Param": "'+str(self.scoreParam.lower())+'"}' else: return 'Error',modelName.upper(),self.scoreParam.lower(),'NA',None,selectedColumns,'','{}',pd.DataFrame(),lag_order,None except Exception as e: self.log.info("getLSTMMultivariate method error. Error msg: "+str(e)) return 'Error',modelName.upper(),self.scoreParam.lower(),'NA',None,selectedColumns,'','{}',pd.DataFrame(),lag_order,None return 'Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,model,selectedColumns,error_matrix,scoredetails,df_predicted,lag_order,scaler def getUniVarientLSTMModel(self): try: self.log.info('Status:- |... TimeSeries Algorithm applied: LSTM') modelName='lstm' lstmconfig = self.modelconfig['lstm'] df = self.data try: # df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') ##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle. try: #for non utc timestamp df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') except: #for utc timestamp df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce',utc=True) df = df.dropna() except: pass tdata = df.drop([self.dateTimeFeature], axis=1) tdata.index = df[self.dateTimeFeature] tdata = pd.DataFrame(tdata[self.targetFeature]) selectedColumns = self.targetFeature+','+self.dateTimeFeature selectedColumns = selectedColumns.split(",") aion_dlts_obj = timeseriesDLUnivariate(lstmconfig,self.trainPercentage,self.targetFeature,self.dateTimeFeature,modelName) status,lstm_mse,lstm_rmse,r2,mae,lstm_model,df_predicted_lstm,lag_order,scaler = aion_dlts_obj.ts_lstm(tdata) if status.lower() == 'success': ## Added for additional scoring params if (self.scoreParam.lower() == "r2"): scoringparam_v=r2 self.log.info("LSTM Univariant User selected scoring parameter is r2. r2 value: "+str(r2)) elif (self.scoreParam.lower() == "rmse"): scoringparam_v=lstm_rmse self.log.info("LSTM Univariant User selected scoring parameter is RMSE. Rmse value: "+str(lstm_rmse)) elif (self.scoreParam.lower() == "mse"): scoringparam_v=lstm_mse self.log.info("LSTM Univariant User selected scoring parameter is MSE. Mse value: "+str(lstm_mse)) elif (self.scoreParam.lower() == "mae"): scoringparam_v=mae self.log.info("LSTM Univariant User selected scoring parameter is MAE. Mae value: "+str(mae)) else: scoringparam_v=lstm_rmse error_matrix = '"RMSE":"'+str(round(lstm_rmse,2))+'","MSE":"'+str(round(lstm_mse,2))+'"' error_matrix=error_matrix+',"R2":"'+str(round(r2,2))+'","MAE":"'+str(round(mae,2))+'"' self.log.info("LSTM Univariant, all scoring param results: "+str(error_matrix)) self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs scoredetails = '{"Model":"LSTM Univariant","Score":'+str(scoringparam_v)+',"Scoring Param": "'+str(self.scoreParam.lower())+'"}' return 'Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,lstm_model,selectedColumns,error_matrix,scoredetails,df_predicted_lstm,lag_order,scaler else: return 'Error',modelName.upper(),self.scoreParam.lower(),0,None,selectedColumns,'','{}',pd.DataFrame(),0,None except Exception as inst: self.log.info('<!------------- LSTM Error ---------------> '+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return 'Error',modelName.upper(),self.scoreParam.lower(),0,None,selectedColumns,'','{}',pd.DataFrame(),0,None def getUniVarientMLPModel(self): try: self.log.info('Status:- |... TimeSeries Algorithm applied: MLP') modelName='mlp' lstmconfig = self.modelconfig['mlp'] df = self.data try: # df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') ##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle. try: #for non utc timestamp df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce') except: #for utc timestamp df[self.dateTimeFeature] = pd.to_datetime(df[self.dateTimeFeature],errors='coerce',utc=True) df = df.dropna() except: pass tdata = df.drop([self.dateTimeFeature], axis=1) tdata.index = df[self.dateTimeFeature] tdata = pd.DataFrame(tdata[self.targetFeature]) selectedColumns = self.targetFeature+','+self.dateTimeFeature selectedColumns = selectedColumns.split(",") aion_dlts_obj = timeseriesDLUnivariate(lstmconfig,self.trainPercentage,self.targetFeature,self.dateTimeFeature,modelName) mlp_mse,mlp_rmse,r2,mae,mlp_model,df_predicted_mlp,look_back,scaler = aion_dlts_obj.mlpDL(tdata) ## Added for additional scoring params if (self.scoreParam.lower() == "r2"): scoringparam_v=r2 self.log.info("MLP Univariant User selected scoring parameter is R2. R2 value: "+str(r2)) elif (self.scoreParam.lower() == "rmse"): scoringparam_v=mlp_rmse self.log.info("MLP Univariant User selected scoring parameter is RMSE. Rmse value: "+str(mlp_rmse)) elif (self.scoreParam.lower() == "mse"): scoringparam_v=mlp_mse self.log.info("MLP Univariant User selected scoring parameter is MSE. Mse value: "+str(mlp_mse)) elif (self.scoreParam.lower() == "mae"): scoringparam_v=mae self.log.info("MLP Univariant User selected scoring parameter is MAE. Mae value: "+str(mae)) else: scoringparam_v=mlp_rmse error_matrix = '"RMSE":"'+str(round(mlp_rmse,2))+'","MSE":"'+str(round(mlp_mse,2))+'"' error_matrix=error_matrix+',"R2":"'+str(round(r2,2))+'","MAE":"'+str(round(mae,2))+'"' self.log.info("MLP Univariant, all scoring param results: "+str(error_matrix)) self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs scoredetails = '{"Model":"MLP","Score":'+str(scoringparam_v)+',"Scoring Param": "'+str(self.scoreParam.lower())+'"}' return 'Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,mlp_model,selectedColumns,error_matrix,scoredetails,df_predicted_mlp,look_back,scaler except Exception as inst: import traceback self.log.info("MLP Error in timeseries module: \n"+str(traceback.print_exc())) self.log.info('<!------------- MLP Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return 'Error',modelName.upper(),self.scoreParam.lower(),0,None,selectedColumns,'','{}',pd.DataFrame(),0,None def getARIMAmodel(self,predicted_data_file): try: modelName='arima' status,eion_arima_obj = self.get_arima_values() self.log.info('Status:- |... TimeSeries Algorithm applied: ARIMA') selected_feature_list = self.data[self.targetFeature].values selected_feature_list = selected_feature_list.astype('int32') self.log.info('-------> Target Feature First 5 Rows: ') self.log.info(self.data[self.targetFeature].head(5)) X_Train = pd.DataFrame(self.data[self.targetFeature]) try: # self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce') ##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle. try: #for non utc timestamp self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce') except: #for utc timestamp self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce',utc=True) self.data = self.data.dropna() except: pass if status.lower() == 'success': self.model_fit,mae,rmse_arima,mse,r2,aic_score,mape,valid,pred = eion_arima_obj.eion_arima(X_Train) ## Added for additional scoring params if (self.scoreParam.lower() == "r2"): scoringparam_v=r2 self.log.info("ARIMA Univariant User selected scoring parameter is r2. r2 value: "+str(r2)) elif (self.scoreParam.lower() == "rmse"): scoringparam_v=rmse_arima self.log.info("ARIMA Univariant User selected scoring parameter is RMSE. RMSE value: "+str(rmse_arima)) elif (self.scoreParam.lower() == "mse"): scoringparam_v=mse self.log.info("ARIMA Univariant User selected scoring parameter is MSE. MSE value: "+str(mse)) elif (self.scoreParam.lower() == "mae"): scoringparam_v=mae self.log.info("ARIMA Univariant User selected scoring parameter is MAE. RMSE value: "+str(rmse_arima)) else: scoringparam_v=rmse_arima error_matrix = '"RMSE":"'+str(round(rmse_arima,2))+'","MSE":"'+str(round(mse,2))+'"' error_matrix=error_matrix+',"R2":"'+str(round(r2,2))+'","MAE":"'+str(round(mae,2))+'"' self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs self.log.info("ARIMA all scoring param results: "+str(error_matrix)) scoredetails = '{"Model":"ARIMA","Score":'+str(scoringparam_v)+',"Scoring Param": "'+str(self.scoreParam.lower())+'"}' df_pred = pd.DataFrame() df_pred[self.targetFeature+'_actual'] = valid[self.targetFeature] df_pred[self.targetFeature+'_pred'] = pred self.log.info('Status:- |... Score '+self.scoreParam.capitalize()+': '+str(round(scoringparam_v,2))) #task 11997 displaying user selected scoring parameter in status logs self.log.info("ARIMA AIC Score: "+str(round(aic_score,2))) selectedColumns = self.targetFeature+','+self.dateTimeFeature selectedColumns = selectedColumns.split(",") self.log.info('------------- End ARIMA Model -------------\n') return('Success',modelName.upper(),self.scoreParam.lower(),scoringparam_v,self.model_fit,selectedColumns,error_matrix,scoredetails,self.dictDiffCount,self.pred_freq,self.additional_regressors,rmse_arima,df_pred) else: return 'Error',modelName.upper(),self.scoreParam.lower(),0,None,selectedColumns,'','{}',self.dictDiffCount,self.pred_freq,self.additional_regressors,0,pd.DataFrame() except Exception as inst: self.log.info('<!------------- Arima Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return 'Error',modelName.upper(),self.scoreParam.lower(),0,None,selectedColumns,'','{}',self.dictDiffCount,self.pred_freq,self.additional_regressors,0,pd.DataFrame() ## VAR model fn def getVARmodel(self): from sklearn.metrics import r2_score modelName="var" if modelName.lower()=='var': try: self.log.info('-------> Top 5 Rows: ') self.log.info(self.data.head(5)) if(self.targetFeature != ''): self.selectedColumns = self.targetFeature self.selectedColumns = self.selectedColumns+','+self.dateTimeFeature self.selectedColumns = self.selectedColumns.split(",") self.data = self.data[self.selectedColumns] try: # self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce') ##For bug 13513 - If the datetime needs UTC timestamp process, except part will handle. try: #for non utc timestamp self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce') except: #for utc timestamp self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature],errors='coerce',utc=True) self.data = self.data.dropna() except: pass self.data = self.data.groupby(self.dateTimeFeature).mean() self.data = self.data.reset_index() tdata = self.data.drop([self.dateTimeFeature], axis=1) tdata.index = self.data[self.dateTimeFeature] cols = tdata.columns self.log.info('------------- Start VAR Model -------------') size = int(len(tdata) * (self.trainPercentage/100)) train = tdata.iloc[0:(int(size))] valid = tdata.iloc[(int(size)):(int(len(tdata))-1)] start = time.time() model = VAR(endog=train) self.log.info('Status:- |... TimeSeries Algorithm applied: VAR') self.log.info("\n-------------Selecting max lag order -----------") modelFitted = model.fit() lag_order = modelFitted.k_ar self.log.info('------->lag_order: '+str(lag_order)) executionTime=time.time() - start self.log.info('-------> Time: '+str(executionTime)+'\n') prediction = modelFitted.forecast(train.values[-lag_order:], steps=len(valid)) pred = pd.DataFrame(index=range(0,len(prediction)),columns=tdata.columns) for j in range(0,(len(tdata.columns))): for i in range(0, len(prediction)): pred.iloc[i][j] = prediction[i][j] self.log.info("\n--------------- Modal Validation Start ---------------") df_pred = pd.DataFrame() #check rmse error_matrix = '"FeaturesMatrix":[' valid = valid.reset_index() var_rmse = 0 mse_dict={} rmse_dict={} mae_dict={} r2_dict={} for i in tdata.columns: if(error_matrix != '"FeaturesMatrix":['): error_matrix = error_matrix+',' df_pred[i+'_actual'] = valid[i] df_pred[i+'_pred'] = pred[i] rmse_var = sqrt(mean_squared_error(valid[i],pred[i])) mse = mean_squared_error(valid[i],pred[i]) mae = mean_absolute_error(valid[i],pred[i]) mape=self.mean_absolute_percentage_error(valid[i],pred[i]) r2 = r2_score(valid[i],pred[i]) mse_dict[i]=mse rmse_dict[i]=rmse_var r2_dict[i]=r2 mae_dict[i]=mae var_rmse += round(rmse_var,2) error_matrix += '{"Features":"'+i+'","MSE":"'+str(round(mse,2))+'","MAPE":"'+str(round(mape,2))+'","RMSE":"'+str(round(rmse_var,2))+'","MAE":"'+str(round(mae,2))+'"}' self.log.info ("------->Feature: "+str(i)) self.log.info ("---------->MAE: "+str(mae)) self.log.info ("---------->MSE: "+str(mse)) self.log.info ("---------->RMSE :"+str(rmse_var)) self.log.info("------------->MAPE :"+str(mape)) self.log.info("------------->R2 :"+str(r2)) var_rmse = var_rmse/len(tdata.columns) self.log.info("--------------- Modal Validation End ---------------\n") pred=self.invertTransformation(train,self.targetFeature,pred,self.dictDiffCount) self.log.info("-------> Predictions on Test Data") self.log.info(pred.head(4)) model = VAR(endog=tdata) self.model_fit = model.fit() aic_score = self.model_fit.aic lag_order = self.model_fit.k_ar self.log.info('------->AIC Score: '+str(aic_score)) error_matrix += ']' scoredetails = '{"Model":"VAR","Score":'+str(aic_score)+'}' self.log.info('Status:- |... Score AIC: '+str(round(aic_score,2))) tsModelTestObj=timeseriesModelTests(self.data1,self.targetFeature,self.dateTimeFeature,0) lenTargetFeature=len(self.targetFeature) r2=list(r2_dict.values())[-1] rmse=list(rmse_dict.values())[-1] mse=list(mse_dict.values())[-1] mae=list(mae_dict.values())[-1] ## Added for additional scoring params if (self.scoreParam.lower() == "r2"): scoringparam_v=r2 self.log.info("VAR Univariant User selected scoring parameter is r2. r2 value: "+str(r2)) self.log.info('Status:- |... Score R2(Avg): '+str(round(r2,2))) elif (self.scoreParam.lower() == "rmse"): scoringparam_v=rmse self.log.info("VAR Univariant User selected scoring parameter is RMSE. RMSE value: "+str(rmse)) self.log.info('Status:- |... Score RMSE(Avg): '+str(round(rmse,2))) elif (self.scoreParam.lower() == "mse"): scoringparam_v=mse self.log.info("VAR Univariant User selected scoring parameter is MSE. MSE value: "+str(mse)) self.log.info('Status:- |... Score MSE(Avg): '+str(round(mse,2))) elif (self.scoreParam.lower() == "mae"): scoringparam_v=mae self.log.info("VAR Univariant User selected scoring parameter is MAE. MAE value: "+str(mae)) self.log.info('Status:- |... Score MAE(Avg): '+str(round(mae,2))) else: scoringparam_v=rmse self.log.info('Status:- |... Score RMSE(Avg): '+str(round(rmse,2))) error_matrix = '"RMSE":"'+str(round(rmse,2))+'","MSE":"'+str(round(mse,2))+'"' error_matrix=error_matrix+',"R2":"'+str(round(r2,2))+'","MAE":"'+str(round(mae,2))+'"' self.log.info("VAR, all scoring param results: "+str(error_matrix)) scoredetails = '{"Model":"VAR","Score":'+str(round(scoringparam_v,2))+',"Scoring Param": "'+str(self.scoreParam)+'"}' self.log.info('------------- Start VAR Model End-------------\n') #countDependantFeature=0 out = durbin_watson(tdata) for col, val in zip(tdata.columns, out): self.log.info(col +':'+str(round(val, 2))) if val>0.0 and val <1.5: self.log.info("There is a positive correlation") elif val>=1.5 and val <=2.5: self.log.info("Relationship is Normal ") else : self.log.info("There is a negative correlation") return('Success',modelName.upper(),self.scoreParam.lower(),round(scoringparam_v,2),self.model_fit,self.selectedColumns,error_matrix,scoredetails,df_pred,self.dictDiffCount,self.pred_freq,self.additional_regressors,lag_order) except Exception as inst: self.log.info('<!------------- Var model Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return 'Error',modelName.upper(),self.scoreParam.lower(),0,None,self.selectedColumns,'','{}',pd.DataFrame(),self.dictDiffCount,self.pred_freq,self.additional_regressors,0 ## Get best time series algorithm among selected algs def getbestmodel(self,rmse_prophet,rmse_arima,rmse_lstm,rmse_mlp,rmse_var): best_model='' ## For R2 score if (self.scoreParam.lower() == 'r2'): modelScore = [] modelScore.append(rmse_prophet) modelScore.append(rmse_arima) modelScore.append(rmse_lstm) modelScore.append(rmse_mlp) modelScore.append(rmse_var) if (max(modelScore) == rmse_arima): best_model='arima' self.log.info('Status:- |... TimeSeries Best Algorithm: ARIMA') return best_model elif (max(modelScore) == rmse_prophet): best_model='fbprophet' self.log.info('Status:- |... TimeSeries Best Algorithm: FBPROPHET') return best_model elif (max(modelScore) == rmse_lstm): best_model='lstm' self.log.info('Status:- |... TimeSeries Best Algorithm: LSTM') return best_model elif (max(modelScore) == rmse_mlp): best_model='mlp' self.log.info('Status:- |... TimeSeries Best Algorithm: MLP') return best_model elif (max(modelScore) == rmse_var): best_model='var' self.log.info('Status:- |... TimeSeries Best Algorithm: VAR') return best_model else: #'Both arima and fbprophet rmse are equal, so both models are performing equal. ## So, selecting arima as best one. best_model='arima' return best_model else: modelScore = [] modelScore.append(rmse_prophet) modelScore.append(rmse_arima) modelScore.append(rmse_lstm) modelScore.append(rmse_mlp) modelScore.append(rmse_var) if (min(modelScore) == rmse_arima and rmse_arima != 0xFFFF): best_model='arima' self.log.info('Status:- |... TimeSeries Best Algorithm: ARIMA') return best_model elif (min(modelScore) == rmse_prophet and rmse_prophet != 0xFFFF): best_model='fbprophet' self.log.info('Status:- |... TimeSeries Best Algorithm: FBPROPHET') return best_model elif (min(modelScore) == rmse_lstm and rmse_lstm != 0xFFFF): best_model='lstm' self.log.info('Status:- |... TimeSeries Best Algorithm: LSTM') return best_model elif (min(modelScore) == rmse_mlp and rmse_mlp != 0xFFFF): best_model='mlp' self.log.info('Status:- |... TimeSeries Best Algorithm: MLP') return best_model elif (min(modelScore) == rmse_var and rmse_var != 0xFFFF): best_model='var' self.log.info('Status:- |... TimeSeries Best Algorithm: VAR') return best_model else: #'Both arima and fbprophet rmse are equal, so both models are performing equal. ## So, selecting arima as best one. best_model='arima' return best_model ## Selecting best model algorithm def bestmodelProcess(self,modelNames,nfeatures,trained_data_file,tFeature,predicted_data_file,dataFolderLocation): try: best_model='' lag_order = 1 predict_var=None predict_arima=None predict_lstm=None predict_mlp=None predict_fbprophet=None modelNames = modelNames modelNames=[x.lower() for x in modelNames] inputFeature_len=nfeatures status = 'Success' if 'fbprophet' in modelNames: status,modelName_prophet,fbprophet,rmse_prophet,fp_model_fit,selectedColumns_prophet,error_matrix_prophet,scoredetails_prophet,dictDiffCount_prophet,pred_freq_prophet,additional_regressors_prophet,predict_fbprophet = self.getfbprophetmodel(predicted_data_file,dataFolderLocation,tFeature) if status.lower() == 'error': self.log.info('-------------> FBPROPHET RMSE Score: Error') if (self.scoreParam.lower() == 'r2'): rmse_prophet = -0xFFFF else: rmse_prophet = 0xFFFF else: self.log.info("-------------> FBPROPHET RMSE Score:\t"+str(round(rmse_prophet,2))) else: if (self.scoreParam.lower() == 'r2'): rmse_prophet = -0xFFFF else: rmse_prophet = 0xFFFF if 'arima' in modelNames: status,modelName,aic,rmse_arima,ar_model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,rmse_arima_act,predict_arima = self.getARIMAmodel(predicted_data_file) if status.lower() == 'error': self.log.info('-------------> ARIMA RMSE Score: Error') if (self.scoreParam.lower() == 'r2'): rmse_arima = -0xFFFF else: rmse_arima = 0xFFFF else: self.log.info('-------------> ARIMA RMSE Score:\t'+str(round(rmse_arima,2))) else: if (self.scoreParam.lower() == 'r2'): rmse_arima = -0xFFFF ## -65535 else: rmse_arima = 0xFFFF if 'lstm' in modelNames: if inputFeature_len == 1: status,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,predict_lstm,lag_order,lstm_scaler = self.getUniVarientLSTMModel() else: status,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,predict_lstm,lag_order,lstm_scaler = self.getLSTMMultivariate() if status.lower() == 'error': self.log.info('-------------> LSTM RMSE Score: Error') if (self.scoreParam.lower() == 'r2'): rmse_lstm = -0xFFFF else: rmse_lstm = 0xFFFF else: self.log.info('-------------> LSTM RMSE Score:\t'+str(round(rmse_lstm,2))) else: if (self.scoreParam.lower() == 'r2'): rmse_lstm = -0xFFFF else: rmse_lstm = 0xFFFF if 'mlp' in modelNames: status,modelName_mlp,score_type,rmse_mlp,mlp_model_fit,mlp_selectedColumns,error_matrix_mlp,scoredetails_mlp,predict_mlp,lag_order,mlp_scaler = self.getUniVarientMLPModel() if status.lower() == 'error': self.log.info('-------------> MLP Score: Error') if (self.scoreParam.lower() == 'r2'): rmse_mlp = -0xFFFF else: rmse_mlp = 0xFFFF else: self.log.info('-------------> MLP RMSE Score:\t'+str(round(rmse_mlp,2))) else: if (self.scoreParam.lower() == 'r2'): rmse_mlp = -0xFFFF else: rmse_mlp = 0xFFFF if 'var' in modelNames: status,modelName_var,score_var_type,rmse_var,var_model,var_selectedColumns,error_matrix_var,scoredetails_var,predict_var,dictDiffCount,pred_freq,additional_regressors,lag_order = self.getVARmodel() if status.lower() == 'error': self.log.info('-------------> VAR Score: Error') if (self.scoreParam.lower() == 'r2'): rmse_var = -0xFFFF else: rmse_var = 0xFFFF else: if (self.scoreParam.lower() == 'r2'): rmse_var = -0xFFFF else: rmse_var = 0xFFFF best_model = self.getbestmodel(rmse_prophet,rmse_arima,rmse_lstm,rmse_mlp,rmse_var) if (best_model.lower() == 'arima'): self.log.info('Best model is ARIMA based on metric '+str(self.scoreParam.lower())) predict_arima.to_csv(predicted_data_file) filename,saved_model = self.save_model(ar_model_fit) return best_model,modelName,aic,rmse_arima,ar_model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,'NA' elif (best_model.lower() == 'fbprophet'): self.log.info('Best model is fbprophet based on metric '+str(self.scoreParam.lower())) predict_fbprophet.to_csv(predicted_data_file) filename,saved_model = self.save_model(fp_model_fit) return best_model,modelName_prophet,fbprophet,rmse_prophet,fp_model_fit,selectedColumns_prophet,error_matrix_prophet,scoredetails_prophet,dictDiffCount_prophet,pred_freq_prophet,additional_regressors_prophet,filename,saved_model,lag_order,'NA' elif (best_model.lower() == 'var'): self.log.info('Best model is VAR based on metric '+str(self.scoreParam.lower())) self.data.to_csv(trained_data_file) predict_var.to_csv(predicted_data_file) filename,saved_model = self.save_model(var_model) return best_model,modelName_var,score_var_type,rmse_var,var_model,var_selectedColumns,error_matrix_var,scoredetails_var,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,'NA' elif (best_model.lower() == 'lstm'): self.log.info('Best model is LSTM based on metric '+str(self.scoreParam.lower())) predict_lstm.to_csv(predicted_data_file) filename,saved_model,scaler_model = self.save_dl_model(lstm_model_fit,lstm_scaler) return best_model,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,self.dictDiffCount,self.pred_freq,self.additional_regressors,filename,saved_model,lag_order,scaler_model elif (best_model.lower() == 'mlp'): self.log.info('Best model is MLP based on metric '+str(self.scoreParam.lower())) predict_mlp.to_csv(predicted_data_file) filename,saved_model,scaler_model = self.save_dl_model(mlp_model_fit,mlp_scaler) return best_model,modelName_mlp,score_type,rmse_mlp,mlp_model_fit,mlp_selectedColumns,error_matrix_mlp,scoredetails_mlp,self.dictDiffCount,self.pred_freq,self.additional_regressors,filename,saved_model,lag_order,scaler_model else: pass except Exception as e: self.log.info('Issue in running multi time series algorithm selection process..Please check the config params') self.log.info('error: '+str(e)) #Method to determine seasonality and stationrity in the input data features. (Task:12622,12623) def seasonality_stationarity_test(self): ##The below part is to test stationarity and sessonality in the given time series data based on statsmodels lib. #self.data,self.targetFeature,self.dateTimeFeature self.log.info("<-------------- Time series stationarity and seasonality test Started...---------------->\n") ts_sstest=tsStationarySeasonalityTest(self.data,self.deployLocation) ## Time series Stationary check ## Currently stationarity check method set as Augmented dickey fuller, but kpss method also implemented. stationary_method='adfuller' if (isinstance(self.targetFeature,list)): target=self.targetFeature pass elif (isinstance(self.targetFeature,str)): target=list(self.targetFeature.split(',')) stats_model,n_lags,p_value,stationary_result,stationary_combined_res=ts_sstest.stationary_check(target,self.dateTimeFeature,stationary_method) ## Time series Seasonality check ##Seasonal model default set as additive seasonal_model="additive" df,decompose_result_mult,seasonality_result,seasonality_combined_res=ts_sstest.seasonal_check(target,self.dateTimeFeature,seasonal_model) self.log.info("<-------------- Time series stationarity and seasonality test completed.---------------->\n") return stationary_result,seasonality_result #Main timeseries function. def timeseries_learning(self,trained_data_file,predicted_data_file,dataFolderLocation): dataFolderLocation=dataFolderLocation lag_order = 1 # ##The below part is to test stationarity and sessonality in the given time series data based on statsmodels lib. stationary_result,seasonality_result=self.seasonality_stationarity_test() try : tFeature = self.targetFeature.split(',') lentFeature=len(tFeature) try: if lentFeature > 1: if any('timeseriesforecasting' in x.lower() for x in self.modelName): #task 11997 self.modelName.remove('timeseriesforecasting') if 'arima' in self.modelName: self.log.info('Status:- |... TimeSeries algorithm ARIMA not supported for multiple features') self.modelName.remove('arima') if 'fbprophet' in self.modelName: self.log.info('Status:- |... TimeSeries algorithm FBPROPHET not supported for multiple features') self.modelName.remove('fbprophet') if 'mlp' in self.modelName: self.log.info('Status:- |... TimeSeries algorithm MLP not supported for multiple features') self.modelName.remove('mlp') if len(self.modelName) == 0: self.log.info('--------> Default Set to VAR') self.modelName.append('var') if lentFeature == 1: if any('timeseriesforecasting' in x.lower() for x in self.modelName): #task 11997 self.modelName.remove('timeseriesforecasting') if 'var' in self.modelName: self.log.info('Status:- |... TimeSeries algorithm VAR not supported for single feature') self.modelName.remove('var') if len(self.modelName) == 0: self.log.info('--------> Default Set to ARIMA,FBProphet') self.modelName.append('arima') except Exception as e: self.log.info('input model name error: '+ str(e)) self.log.info("error in user selected model, may be wrong configuration, please check.") if (len(self.modelName) > 1): try: self.log.info('User selected models: '+str(self.modelName)) best_model,modelName,score_type,score,model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,scaler_transformation = self.bestmodelProcess(self.modelName,lentFeature,trained_data_file,tFeature,predicted_data_file,dataFolderLocation) return best_model,modelName,score_type,score,model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,scaler_transformation except Exception as e: self.log.info('multi model timeseries processing error '+str(e)) else: self.modelName = self.modelName[0] ## Normal arima ,var or fbprophet model call (user selects only one model at a time) if self.modelName.lower() == 'fbprophet': try: model_name='fbprophet' status,modelName,fbprophet,rmse_prophet,fp_model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,predict_output = self.getfbprophetmodel(predicted_data_file,dataFolderLocation,tFeature) if status.lower() == 'success': predict_output.to_csv(predicted_data_file) filename,saved_model = self.save_model(fp_model_fit) return 'self.modelName',modelName,fbprophet,rmse_prophet,fp_model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,'NA' else: raise Exception('Exception during model training') except Exception as e: self.log.info('fbprophet error....') self.log.info(e) elif self.modelName.lower() == 'encoder_decoder_lstm_mvi_uvo': try: status,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,predict_lstm,lag_order,lstm_scaler = self.getEncDecLSTMMultVrtInUniVrtOut() if status.lower() == 'success': predict_lstm.to_csv(predicted_data_file) filename,saved_model,scaler_model = self.save_dl_model(lstm_model_fit,lstm_scaler) return self.modelName,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,self.dictDiffCount,self.pred_freq,self.additional_regressors,filename,saved_model,lag_order,scaler_model else: raise Exception('Exception during model training') except Exception as inst: self.log.info('<!------------- LSTM Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) elif self.modelName.lower() == 'lstm': try: if lentFeature == 1: status,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,predict_lstm,lag_order,lstm_scaler = self.getUniVarientLSTMModel() else: status,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,predict_lstm,lag_order,lstm_scaler = self.getLSTMMultivariate() if status.lower() == 'success': predict_lstm.to_csv(predicted_data_file) filename,saved_model,scaler_model = self.save_dl_model(lstm_model_fit,lstm_scaler) return self.modelName,modelName_lstm,score_type,rmse_lstm,lstm_model_fit,lstm_selectedColumns,error_matrix_lstm,scoredetails_lstm,self.dictDiffCount,self.pred_freq,self.additional_regressors,filename,saved_model,lag_order,scaler_model else: raise Exception('Exception during model training') except Exception as inst: self.log.info('<!------------- LSTM Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) elif self.modelName.lower() == 'mlp': try: status,modelName_mlp,score_type,rmse_mlp,mlp_model_fit,mlp_selectedColumns,error_matrix_mlp,scoredetails_mlp,predict_mlp,lag_order,mlp_scaler = self.getUniVarientMLPModel() if status.lower() == 'success': predict_mlp.to_csv(predicted_data_file) filename,saved_model,scaler_model = self.save_dl_model(mlp_model_fit,mlp_scaler) return self.modelName,modelName_mlp,score_type,rmse_mlp,mlp_model_fit,mlp_selectedColumns,error_matrix_mlp,scoredetails_mlp,self.dictDiffCount,self.pred_freq,self.additional_regressors,filename,saved_model,lag_order,scaler_model else: raise Exception('Exception during model training') except Exception as inst: self.log.info('<!------------- MLP Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) else: #task 12627 time series profiler removed if lentFeature>1: self.modelName='var' self.data.to_csv(trained_data_file) else: self.modelName='arima' if self.modelName.lower()=='var': tsModelTestObj=timeseriesModelTests(self.data,self.targetFeature,self.dateTimeFeature,0) self.data,self.dictDiffCount=tsModelTestObj.StatinaryChecks(self.dictDiffCount) #self.log.info('Status:- |... Stationary Check Done.') gtestResults,countVariables=tsModelTestObj.grangersCausationMatrix(self.data,tFeature) if countVariables >= (lentFeature*lentFeature)-(lentFeature) or ((lentFeature*lentFeature)-(lentFeature))/2 : coIntegrationVectors=tsModelTestObj.coIntegrationTest(self.data) if coIntegrationVectors<=lentFeature: self.log.info("There are statistically significant relationship in data ") self.log.info('Status:- |... Statistically Check Done. Statistically significant relations') else: self.log.info("There are no statistically significant relationship in data") self.log.info('Status:- |... Statistically Check Done. No statistically significant relations') else: self.modelName='arima' if self.modelName.lower()=='var': try: self.log.info('ARIMA, FBProphet cannot apply, Input data contains more than one feature, only VAR algorithm can apply, applying VAR by AION \n') status,modelName,aic,aic_score,model_fit,selectedColumns,error_matrix,scoredetails,predict_var,dictDiffCount,pred_freq,additional_regressors,lag_order = self.getVARmodel() if status.lower() == 'success': filename,saved_model = self.save_model(model_fit) predict_var.to_csv(predicted_data_file) return self.modelName,modelName,aic,aic_score,model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,'NA' else: raise Exception('Exception during VAR model training') except Exception as inst: self.log.info('<!------------- Var model Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) if self.modelName.lower() == 'arima': try: status,modelName,aic,scoringparam_v,model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,rmse_arima_act,predict_output = self.getARIMAmodel(predicted_data_file) if status.lower() == 'success': predict_output.to_csv(predicted_data_file) filename,saved_model = self.save_model(model_fit) lag_order=0 return self.modelName,modelName,aic,scoringparam_v,model_fit,selectedColumns,error_matrix,scoredetails,dictDiffCount,pred_freq,additional_regressors,filename,saved_model,lag_order,'NA' else: raise Exception('Exception during model training') except Exception as inst: self.log.info('<!------------- Arima Error ---------------> '+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) except Exception as inst: self.log.info('<!------------- TimeSeries Learning Error ---------------> '+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def invertTransformation(self,Xtrain,targetFeature, preddf,dictDiffCount): try: dfforecast = preddf.copy() self.log.info(dfforecast.head(5)) columns =targetFeature.split(",") self.log.info(columns) self.log.info(dictDiffCount) for col in columns: if col in dictDiffCount: if dictDiffCount[col]==2: dfforecast[col] = (Xtrain[col].iloc[-1]-Xtrain[col].iloc[-2]) + dfforecast[col].cumsum() dfforecast[col] = Xtrain[col].iloc[-1] + dfforecast[col].cumsum() # Roll back 1st Diff return dfforecast except Exception as inst: self.log.info('<!------------- invertTransformation Error ---------------> '+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
ts_arima_eion.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' # For timeseries pyramid pdaarima module from pmdarima.arima import auto_arima import pmdarima as pm import json #Python sklearn & std libraries import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.feature_selection import VarianceThreshold from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error #from sklearn.metrics import mean_absolute_percentage_error from sklearn.linear_model import LinearRegression from math import sqrt import warnings # For serialization. #from sklearn.externals import joblib import pickle import os,sys # For ploting (mathlab) import matplotlib.pyplot as plt #Import eion config manager module import logging from sklearn import metrics from sklearn.metrics import accuracy_score import time import os import sys # Eion arima module class eion_arima (): #Constructor def __init__(self,configfile,testpercentage,sesonalityChecks,stationaryChecks): # eaobj - eion arima class object try: tsarima_params = configfile self.testpercentage = testpercentage self.start_p= int(tsarima_params['start_p']) self.start_q= int(tsarima_params['start_q']) self.max_p= int(tsarima_params['max_p']) self.max_q= int(tsarima_params['max_q']) self.max_d= int(tsarima_params['max_d']) self.max_order= int(tsarima_params['max_order']) self.start_Q= int(tsarima_params['start_Q']) self.max_P= int(tsarima_params['max_P']) self.max_D= int(tsarima_params['max_D']) self.max_Q= int(tsarima_params['max_Q']) self.m= int(tsarima_params['m']) self.start_P= int(tsarima_params['start_P']) self.seasonal= tsarima_params['seasonal'] #self.seasonal= sesonalityChecks self.stationary=stationaryChecks #print("self.seasonal: \n",self.seasonal) #print("self.stationary: \n",self.stationary) if self.seasonal and not self.seasonal.isspace(): if (self.seasonal.lower() == 'true'): self.seasonal=True elif (self.seasonal.lower() == 'false'): self.seasonal=False else: self.seasonal=True else: self.seasonal=True self.d= int(tsarima_params['d']) self.D= int(tsarima_params['D']) #self.trace= tsarima_params['trace'] self.error_action= tsarima_params['error_action'] self.suppress_warnings= tsarima_params['suppress_warnings'] self.stepwise= tsarima_params['stepwise'] #self.random= tsarima_params['random'] self.log = logging.getLogger('eion') except Exception as inst: self.log.info('<!------------- Arima INIT Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def mean_absolute_percentage_error(self,y_true, y_pred): try: y_true, y_pred=np.array(y_true), np.array(y_pred) return np.mean(np.abs((y_true - y_pred) / y_true+sys.float_info.epsilon)) * 100 except Exception as inst: self.log.info('<------------- mean_absolute_percentage_error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def eion_arima(self,train_data): try: start = time.time() auto_arima_stepwise_fit = pm.auto_arima(train_data, start_p=self.start_p, start_q=self.start_q,max_p=self.max_p, max_q=self.max_q,max_d=self.max_d,max_P=self.max_P,max_D=self.max_D,max_Q=self.max_Q,max_order=self.max_order, m=self.m,start_P=self.start_P,start_Q=self.start_Q, seasonal=self.seasonal,stationary=self.stationary,d=self.d, D=self.D,error_action=self.error_action,suppress_warnings=self.suppress_warnings,stepwise=self.stepwise) #auto_arima_stepwise_fit = pm.auto_arima(train_data, start_p=self.start_p, start_q=self.start_q,max_p=self.max_p, max_q=self.max_q,max_d=self.max_d,max_P=self.max_P,max_D=self.max_D,max_Q=self.max_Q,max_order=self.max_order, m=self.m,start_P=self.start_P,start_Q=self.start_Q, seasonal=True,stationary=True,d=self.d, D=self.D,error_action=self.error_action,suppress_warnings=self.suppress_warnings,random_state=20,stepwise=True) aic_score = auto_arima_stepwise_fit.aic() self.log.info('------->AIC Score: '+str(aic_score)) self.log.info('\n--------- Fit Summary --------------') self.log.info (auto_arima_stepwise_fit.summary()) self.log.info('--------- Fit Summary End--------------\n') self.log.info("\n--------------- Modal Validation Start ---------------") size = int(len(train_data) * (100 - self.testpercentage)/100) train = train_data.loc[0:size] valid = train_data.loc[size:len(train_data)] # valid_perc=((100-self.testpercentage)/100) # valid_perc=round(valid_perc, 1) # print("valid_perc: \n", valid_perc) self.log.info("------->Train Data Shape: "+str(train.shape)) self.log.info("------->Valid Data Shape"+str(valid.shape)) start1=len(train) end1=len(train_data) modelfit = auto_arima_stepwise_fit.fit(train) a_prediction = auto_arima_stepwise_fit.predict(valid.shape[0]) #a_prediction = auto_arima_stepwise_fit.predict(n_periods=len(valid)) #a_prediction = auto_arima_stepwise_fit.predict(start=start1,end=end1) #print("a_prediction: \n",a_prediction) #self.log.info(a_prediction) mae = metrics.mean_absolute_error(valid, a_prediction) self.log.info ("------->MAE: "+str(mae)) mape = self.mean_absolute_percentage_error(valid, a_prediction) #mape=np.mean(np.abs((valid - a_prediction) / valid)) * 100 self.log.info ("------->MAPE :"+str(mape)) #RMSE rmse = sqrt(mean_squared_error(valid,a_prediction)) mse = mean_squared_error(valid,a_prediction) self.log.info ("------->RMSE :"+str(rmse)) self.log.info ("------->MSE :"+str(mse)) from sklearn.metrics import r2_score r2 = r2_score(valid,a_prediction) ########### End #################### # now we have the model auto_arima_stepwise_fit.fit(train_data) self.log.info("------------- Validate Model End----------------\n") executionTime=time.time() - start self.log.info('-------> Time: '+str(executionTime)+'\n') return auto_arima_stepwise_fit,mae,rmse,mse,r2,aic_score,mape,valid,a_prediction except Exception as inst: self.log.info('<!------------- Arima Execute Error ---------------> '+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
timeseriesDLMultivariate.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import pandas as pd import os import numpy as np import numpy import pandas from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM from sklearn.preprocessing import MinMaxScaler import logging import tensorflow as tf from tensorflow.keras.layers import Dropout import math import tensorflow as tf import keras_tuner #from keras_tuner.engine.hyperparameters import HyperParameters from keras_tuner.tuners import RandomSearch,BayesianOptimization ,Hyperband from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import warnings warnings.simplefilter("ignore", UserWarning) class timeseriesDLMultivariate: def __init__(self,configfile,testpercentage,targetFeature,dateTimeFeature): self.look_back=None # self.df=df self.epochs=None self.batch_size=None self.hidden_layers=None self.optimizer=None self.activation_fn="relu" self.loss_fn=None self.first_layer=None self.dropout=None self.model_name=None self.dl_params = configfile # self.data=data self.targetFeature=targetFeature self.dateTimeFeature=dateTimeFeature self.testpercentage = float(testpercentage) self.log = logging.getLogger('eion') ##Added for ts hpt (TFSTask:7033) self.tuner_algorithm="" self.num_features=0 ##Get deep learning model hyperparameter from advanced config def getdlparams(self): val=self.dl_params self.log.info('-------> The given mlp/lstm timeseries algorithm parameters:>>') self.log.info(" "+str(val)) for k,v in val.items(): try: if (k == "tuner_algorithm"): self.tuner_algorithm=str(v) elif (k == "activation"): self.activation_fn=str(v) elif (k == "optimizer"): self.optimizer=str(v) elif (k == "loss"): self.loss_fn=str(v) elif (k == "first_layer"): if not isinstance(k,list): self.first_layer=str(v).split(',') else: self.first_layer=k elif (k == "lag_order"): if isinstance(k,list): k = ''.join(v) k=int(float(str(v))) else: self.look_back=int(float(str(v))) elif (k == "hidden_layers"): self.hidden_layers=int(v) elif (k == "dropout"): if not isinstance(k,list): self.dropout=str(v).split(',') else: self.dropout=k elif (k == "batch_size"): self.batch_size=int(v) elif (k == "epochs"): self.epochs=int(v) elif (k == "model_name"): self.model_name=str(v) except Exception as e: self.log.info('Exception occured in deeep learn param reading, setting up default params.') self.activation_fn="relu" self.optimizer="adam" self.loss_fn="mean_squared_error" self.first_layer=[8,512] self.hidden_layers=1 self.look_back=int(2) self.dropout=[0.1,0.5] self.batch_size=2 self.epochs=50 self.model_name="lstmmodel.h5" continue # Reshape the data to the required input shape of the LSTM model def create_dataset(self,X, y, n_steps): Xs, ys = [], [] for i in range(len(X) - n_steps): v = X.iloc[i:(i + n_steps)].values Xs.append(v) ys.append(y.iloc[i + n_steps]) return np.array(Xs), np.array(ys) ## Added function for hyperparam tuning (TFSTask:7033) def build_model(self,hp): n_features = len(self.targetFeature) try: loss=self.loss_fn optimizer=self.optimizer # self.getdlparams() try: if optimizer.lower() == "adam": optimizer=tensorflow.keras.optimizers.Adam elif(optimizer.lower() == "adadelta"): optimizer=tensorflow.keras.optimizers.experimental.Adadelta elif(optimizer.lower() == "nadam"): optimizer=tensorflow.keras.optimizers.experimental.Nadam elif(optimizer.lower() == "adagrad"): optimizer=tensorflow.keras.optimizers.experimental.Adagrad elif(optimizer.lower() == "adamax"): optimizer=tensorflow.keras.optimizers.experimental.Adamax elif(optimizer.lower() == "rmsprop"): optimizer=tensorflow.keras.optimizers.experimental.RMSprop elif(optimizer.lower() == "sgd"): optimizer=tensorflow.keras.optimizers.experimental.SGD else: optimizer=tensorflow.keras.optimizers.Adam except: optimizer=tf.keras.optimizers.Adam pass # look_back_min=int(self.look_back[0]) # look_back_max=int(self.look_back[1]) first_layer_min=round(int(self.first_layer[0])) first_layer_max=round(int(self.first_layer[1])) dropout_min=float(self.dropout[0]) dropout_max=float(self.dropout[1]) model=tf.keras.Sequential() try: model.add(LSTM(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_shape=(self.look_back,self.num_features))) except Exception as e: import traceback self.log.info("lstm build traceback: \n"+str(traceback.print_exc())) return model model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1))) model.add(Dense(units=n_features)) model.compile(optimizer=optimizer(hp.Choice('learning_rate',values=[1e-1,1e-2,1e-3,1e-4])),loss=loss,metrics=[self.loss_fn]) except Exception as e: self.log.info(",Hyperparam tuning build_model err msg: \n"+ str(e)) return model ##Multivariate lstm prediction function (lstm model, train, prediction, metrics) def lstm_multivariate(self,df): try: self.getdlparams() n_features = len(self.targetFeature) self.num_features=n_features try: if (type(self.targetFeature) is list): pass else: self.targetFeature = list(self.targetFeature.split(",")) except: pass df_new = df[df.columns[df.columns.isin(self.targetFeature)]] scaler=MinMaxScaler() df_transformed=scaler.fit_transform(df_new) ## For hyperparam tuning below part is added.only for getting best model and best hyperparameters train_size = int(len(df) * 0.80) train_data, test_data = train_test_split(df, test_size=0.2, shuffle=False) self.hpt_train=train_data time_steps=self.look_back ## Just for initialization before hyperparameter tuning. tuner_alg=self.tuner_algorithm #The below create_dataset only for getting best model and best hyperparameters X_train, y_train = self.create_dataset(train_data, train_data, time_steps) X_test, y_test = self.create_dataset(test_data, test_data, time_steps) self.log.info("Hyperparameter tuning algorithm is given by user (AION->Advanced configuration -> timeSeriesForecasting->LSTM): \n"+str(tuner_alg)) try: ## Remove untitled_project dir in AION root folder created by previous tuner search run import shutil shutil.rmtree(r".\untitled_project") except: pass try: if (tuner_alg.lower()=="randomsearch"): tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3) elif (tuner_alg.lower()=="bayesianoptimization"): tuner=BayesianOptimization(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3) elif (tuner_alg.lower()=="hyperband"): tuner=Hyperband(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_epochs=50,factor=3) else: self.log.info("The given alg is not implemented. Using default hyperparam tuning algorithm: RandomSearch.\n") tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3) from keras.callbacks import EarlyStopping stop_early = EarlyStopping(monitor='val_loss', patience=5) except Exception as e: self.log.info("The given alg have some issue, Using default hyperparam tuning algorithm: RandomSearch.\n") tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3) self.log.info("tuner errmsg:\n"+str(e)) #hpt search for best params try: tuner.search(X_train, y_train,validation_data=(X_test, y_test),callbacks=[stop_early]) except: tuner.search(x=X_train,y=y_train,validation_split=0.2,callbacks=[stop_early]) # best_model = tuner.get_best_models(num_models=1)[0] # self.log.info("best_model.summary(): \n"+str(best_model.summary())) best_hps=tuner.get_best_hyperparameters(num_trials=1)[0] self.log.info("TS Multivariate LSTM best hyperparameter values:\n"+str(best_hps.values)) self.log.info("Activation fn:\n"+str(self.activation_fn)) # time_steps_best=best_hps.get('time_steps') n_input=self.look_back best_hmodel=tuner.hypermodel.build(best_hps) optimizer=self.optimizer self.first_layer=best_hps.get('units') self.dropout=best_hps.get('Dropout_rate') learning_rate=float(best_hps.get('learning_rate')) try: ##TFSTask:7033, Added below try block for time series hyperparam tuning, here, for any optimizer, best learning_rate is provided from best_hps. try: if optimizer.lower() == "adam": optimizer=tensorflow.keras.optimizers.Adam(learning_rate=learning_rate) elif(optimizer.lower() == "adadelta"): optimizer=tensorflow.keras.optimizers.experimental.Adadelta(learning_rate=learning_rate) elif(optimizer.lower() == "nadam"): optimizer=tensorflow.keras.optimizers.experimental.Nadam(learning_rate=learning_rate) elif(optimizer.lower() == "adagrad"): optimizer=tensorflow.keras.optimizers.experimental.Adagrad(learning_rate=learning_rate) elif(optimizer.lower() == "adamax"): optimizer=tensorflow.keras.optimizers.experimental.Adamax(learning_rate=learning_rate) elif(optimizer.lower() == "rmsprop"): optimizer=tensorflow.keras.optimizers.experimental.RMSprop(learning_rate=learning_rate) elif(optimizer.lower() == "sgd"): optimizer=tensorflow.keras.optimizers.experimental.SGD(learning_rate=learning_rate) else: optimizer=tensorflow.keras.optimizers.Adam(learning_rate=learning_rate) except: optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate) pass ##From best hyperparameter values, now creating multivariate time series model using time generator. t_lb=1 test_size=t_lb+1 train,test = train_test_split(df_transformed,test_size=0.2,shuffle=False) generatorTrain=TimeseriesGenerator(df_transformed,df_transformed,length=n_input,batch_size=self.batch_size) # generatorTest=TimeseriesGenerator(test,test,length=n_input,batch_size=self.batch_size) batch_0=generatorTrain[0] x,y=batch_0 epochs=int(self.epochs) ##Multivariate LSTM model try: from tensorflow.keras.layers import Dropout model=Sequential() model.add(LSTM(self.first_layer,activation=self.activation_fn,input_shape=(n_input,n_features))) model.add(Dropout(self.dropout)) model.add(Dense(n_features)) model.compile(optimizer=self.optimizer,loss=self.loss_fn) #model.fit(generatorTrain,epochs=epochs,batch_size=self.batch_size,shuffle=False) model.fit_generator(generatorTrain, epochs=epochs,shuffle=False, verbose=0) # lstm_mv_testScore_mse = model.evaluate(x, y, verbose=0) except Exception as e: self.log.info("multivariate model build error: error msg:: \n"+str(e)) return 'Error',0,0,0,0,None,pd.DataFrame(),0,None #predictions = model.predict_generator(generatorTest) except Exception as e: self.log.info("multivariate model build error: error msg:: \n"+str(e)) return 'Error',0,0,0,0,None,pd.DataFrame(),0,None try: predictions=[] future_pred_len=n_input #To get values for prediction,taking look_back steps of rows first_batch=test[-future_pred_len:] c_batch = first_batch.reshape((1,future_pred_len,n_features)) current_pred=None for i in range(len(test)): #get pred for firstbatch current_pred=model.predict_generator(c_batch)[0] predictions.append(current_pred) #remove first val c_batch_rmv_first=c_batch[:,1:,:] #update c_batch=np.append(c_batch_rmv_first,[[current_pred]],axis=1) prediction_actual=scaler.inverse_transform(predictions) test_data_actual=scaler.inverse_transform(test) mse=None rmse=None ## Creating dataframe for actual,predictions try: pred_cols=list() for i in range(len(self.targetFeature)): pred_cols.append(self.targetFeature[i]+'_pred') predictions = pd.DataFrame(prediction_actual, columns=pred_cols) actual = pd.DataFrame(test_data_actual, columns=self.targetFeature) actual.columns = [str(col) + '_actual' for col in df.columns] df_predicted=pd.concat([actual,predictions],axis=1) self.log.info("LSTM Multivariate prediction dataframe: \n"+str(df_predicted)) from math import sqrt from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error target=self.targetFeature mse_dict={} rmse_dict={} mae_dict={} r2_dict={} lstm_var = 0 for name in target: index = df.columns.get_loc(name) mse = mean_squared_error(test_data_actual[:,index],prediction_actual[:,index]) mse_dict[name]=mse rmse=sqrt(mse) rmse_dict[name]=rmse lstm_var = lstm_var+rmse self.log.info("Name of the target feature: "+str(name)) self.log.info("RMSE of the target feature: "+str(rmse)) r2 = r2_score(test_data_actual[:,index],prediction_actual[:,index]) r2_dict[name]=r2 mae = mean_absolute_error(test_data_actual[:,index],prediction_actual[:,index]) mae_dict[name]=mae ## For VAR comparison, send last target mse and rmse from above dict lstm_var = lstm_var/len(target) select_msekey=list(mse_dict.keys())[-1] l_mse=list(mse_dict.values())[-1] select_rmsekey=list(rmse_dict.keys())[-1] l_rmse=list(rmse_dict.values())[-1] select_r2key=list(r2_dict.keys())[-1] l_r2=list(r2_dict.values())[-1] select_maekey=list(mae_dict.keys())[-1] l_mae=list(mae_dict.values())[-1] self.log.info("Selected target feature of LSTM for best model selection: "+str(select_rmsekey)) self.log.info("lstm rmse: "+str(l_rmse)) self.log.info("lstm mse: "+str(l_mse)) self.log.info("lstm r2: "+str(l_r2)) self.log.info("lstm mae: "+str(l_mae)) except Exception as e: import traceback print(" traceback error:\n",traceback.print_exc()) self.log.info("prediction error traceback: \n"+str(traceback.print_exc())) except Exception as e: self.log.info("dataframe creation error. err.msg: "+str(e)) return 'Error',0,0,0,0,None,pd.DataFrame(),0,None return 'Success',round(l_mse,2),round(l_rmse,2),round(l_r2,2),round(l_mae,2),model,df_predicted,n_input,scaler # import os #predicted_file_name='lstm_prediction_df.csv' #predicted_file_path=os.path.join(self.dataFolderLocation,predicted_file_name) #df_predicted.to_csv(predicted_file_path) ##save model #model_path = os.path.join(self.dataFolderLocation,self.model_name) #self.log.info("mlp model saved at: "+str(model_path)) #model.save(model_path) except Exception as e: ## Just use below traceback print to get detailed error information. # import traceback # print(" traceback error 7:\n",traceback.print_exc()) ## Enable traceback for debugging self.log.info("dataframe creation error. err.msg: "+str(e)) return 'Error',0,0,0,0,None,pd.DataFrame(),0,None
tsDLMultiVrtInUniVrtOut.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import sys import pandas as pd import numpy as np import numpy import pandas import math from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Input, Dense, TimeDistributed, LSTM, Dropout, RepeatVector from sklearn.preprocessing import MinMaxScaler import logging import tensorflow as tf import keras_tuner #from keras_tuner.engine.hyperparameters import HyperParameters from keras_tuner.tuners import RandomSearch,BayesianOptimization ,Hyperband from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import warnings warnings.simplefilter("ignore", UserWarning) from sklearn.metrics import mean_absolute_percentage_error class tsDLMultiVrtInUniVrtOut: def __init__(self,configfile,testpercentage,targetFeature,dateTimeFeature): self.look_back=None self.look_forward=None # self.df=df self.epochs=None self.batch_size=None self.hidden_layers=None self.optimizer=None self.activation_fn="relu" self.loss_fn=None self.first_layer=None self.dropout=None self.model_name=None self.dl_params = configfile # self.data=data self.targetFeature=targetFeature self.dateTimeFeature=dateTimeFeature self.testpercentage = float(testpercentage) self.log = logging.getLogger('eion') ##Added for ts hpt (TFSTask:7033) self.tuner_algorithm="" self.num_features=0 ##Get deep learning model hyperparameter from advanced config def getdlparams(self): val=self.dl_params self.log.info('-------> The given mlp/lstm timeseries algorithm parameters:>>') self.log.info(" "+str(val)) for k,v in val.items(): try: if (k == "tuner_algorithm"): self.tuner_algorithm=str(v) elif (k == "activation"): if not isinstance(k,list): self.activation_fn=str(v).split(',') else: self.activation_fn=k elif (k == "optimizer"): self.optimizer=str(v) elif (k == "loss"): self.loss_fn=str(v) elif (k == "first_layer"): if not isinstance(k,list): self.first_layer=str(v).split(',') else: self.first_layer=k elif (k == "lag_order"): if isinstance(k,list): k = ''.join(v) k=int(float(str(v))) else: self.look_back=int(float(str(v))) elif (k == "forward_order"): if isinstance(k,list): k = ''.join(v) k=int(float(str(v))) else: self.look_forward=int(float(str(v))) elif (k == "hidden_layers"): self.hidden_layers=int(v) elif (k == "dropout"): if not isinstance(k,list): self.dropout=str(v).split(',') else: self.dropout=k elif (k == "batch_size"): self.batch_size=int(v) elif (k == "epochs"): self.epochs=int(v) elif (k == "model_name"): self.model_name=str(v) except Exception as e: self.log.info('Exception occured in deeep learn param reading, setting up default params.') self.activation_fn="relu" self.optimizer="adam" self.loss_fn="mean_squared_error" self.first_layer=[8,512] self.hidden_layers=1 self.look_back=int(2) self.dropout=[0.0,0.1,0.01] self.batch_size=2 self.epochs=50 self.model_name="lstmmodel.h5" continue # Reshape the data to the required input shape of the LSTM model def create_dataset(self,series, n_past, n_future, targetcolindx): X, y = list(), list() for window_start in range(len(series)): past_end = window_start + n_past future_end = past_end + n_future if future_end > len(series): break # slicing the past and future parts of the window past, future = series[window_start:past_end, :], series[past_end:future_end, targetcolindx] X.append(past) y.append(future) return np.array(X), np.array(y) #return X, y ## Added function for hyperparam tuning (TFSTask:7033) def build_model(self,hp): n_features = self.num_features try: loss=self.loss_fn optimizer=self.optimizer # self.getdlparams() try: if optimizer.lower() == "adam": optimizer=tensorflow.keras.optimizers.Adam elif(optimizer.lower() == "adadelta"): optimizer=tensorflow.keras.optimizers.experimental.Adadelta elif(optimizer.lower() == "nadam"): optimizer=tensorflow.keras.optimizers.experimental.Nadam elif(optimizer.lower() == "adagrad"): optimizer=tensorflow.keras.optimizers.experimental.Adagrad elif(optimizer.lower() == "adamax"): optimizer=tensorflow.keras.optimizers.experimental.Adamax elif(optimizer.lower() == "rmsprop"): optimizer=tensorflow.keras.optimizers.experimental.RMSprop elif(optimizer.lower() == "sgd"): optimizer=tensorflow.keras.optimizers.experimental.SGD else: optimizer=tensorflow.keras.optimizers.Adam except: optimizer=tf.keras.optimizers.Adam pass # look_back_min=int(self.look_back[0]) # look_back_max=int(self.look_back[1]) first_layer_min=round(int(self.first_layer[0])) first_layer_max=round(int(self.first_layer[1])) dropout_min=float(self.dropout[0]) dropout_max=float(self.dropout[1]) dropout_step=float(self.dropout[2]) #import pdb; pdb.set_trace() n_past= self.look_back n_future = self.look_back encoder_l = {} encoder_outputs = {} encoder_states = {} decoder_l = {} decoder_outputs = {} encoder_inputs = Input(shape=(n_past, n_features)) try: if(self.hidden_layers > 0): encoder_l[0] = LSTM(units=hp.Int('enc_input_unit',min_value=first_layer_min,max_value=first_layer_max,step=32), activation = hp.Choice(f'enc_input_activation', values = self.activation_fn), return_sequences = True, return_state=True) else: encoder_l[0] = LSTM(units=hp.Int('enc_input_unit',min_value=first_layer_min,max_value=first_layer_max,step=32), activation = hp.Choice(f'enc_input_activation', values = self.activation_fn), return_state=True) except Exception as e: import traceback self.log.info("lstm build traceback: \n"+str(traceback.print_exc())) model=tf.keras.Sequential() return model encoder_outputs[0] = encoder_l[0](encoder_inputs) encoder_states[0] = encoder_outputs[0][1:] if(self.hidden_layers > 0): for indx in range(self.hidden_layers): lindx = indx + 1 if lindx == self.hidden_layers: encoder_l[lindx] = LSTM(units=hp.Int(f'enc_lstm_units_{lindx}',min_value=first_layer_min,max_value=first_layer_max,step=32), dropout=hp.Float(f'enc_lstm_dropout_{lindx}',min_value=dropout_min,max_value=dropout_max,step=dropout_step), activation = hp.Choice(f'enc_lstm_activation_{lindx}', values = self.activation_fn), return_state=True) else: encoder_l[lindx] = LSTM(units=hp.Int(f'enc_lstm_units_{lindx}',min_value=first_layer_min,max_value=first_layer_max,step=32), dropout=hp.Float(f'enc_lstm_dropout_{lindx}',min_value=dropout_min,max_value=dropout_max,step=dropout_step), activation = hp.Choice(f'enc_lstm_activation_{lindx}', values = self.activation_fn), return_sequences = True, return_state=True) encoder_outputs[lindx] = encoder_l[lindx](encoder_outputs[indx][0]) encoder_states[lindx] = encoder_outputs[lindx][1:] decoder_inputs = RepeatVector(n_future)(encoder_outputs[self.hidden_layers][0]) else: decoder_inputs = RepeatVector(n_future)(encoder_outputs[0][0]) # if(self.hidden_layers > 0): decoder_l[0] = LSTM(encoder_states[0][0].get_shape()[1], activation = hp.Choice(f'dec_input_activation', values = self.activation_fn), return_sequences=True)(decoder_inputs,initial_state = encoder_states[0]) else: decoder_l[0] = LSTM(encoder_states[0][0].get_shape()[1], activation = hp.Choice(f'dec_input_activation', values = self.activation_fn), return_sequences=True)(decoder_inputs,initial_state = encoder_states[0]) if(self.hidden_layers > 0): for indx in range(self.hidden_layers): lindx = indx + 1 decoder_l[lindx] = LSTM(encoder_states[lindx][0].get_shape()[1], activation = hp.Choice(f'dec_lstm_activation_{lindx}', values = self.activation_fn), return_sequences=True)(decoder_l[indx],initial_state = encoder_states[lindx]) decoder_outputs[0] = TimeDistributed(tf.keras.layers.Dense(decoder_l[self.hidden_layers][0].get_shape()[1], activation = hp.Choice(f'dec_output_activation_1', values = self.activation_fn)))(decoder_l[self.hidden_layers]) decoder_outputs[1] = TimeDistributed(tf.keras.layers.Dense(1))(decoder_outputs[0]) else: # decoder_outputs[0] = TimeDistributed(tf.keras.layers.Dense(decoder_l[0][0].get_shape()[1]))(decoder_l[0]) # decoder_outputs[1] = LSTM(200, return_sequences=True)(decoder_outputs[0]) # decoder_outputs[2] = tf.keras.layers.Flatten()(decoder_outputs[1]) # decoder_outputs[3] = tf.keras.layers.Dense(1)(decoder_outputs[2]) decoder_outputs[0] = TimeDistributed(tf.keras.layers.Dense(decoder_l[0][0].get_shape()[1], activation = hp.Choice(f'dec_output_activation_1', values = self.activation_fn)))(decoder_l[0]) decoder_outputs[1] = TimeDistributed(tf.keras.layers.Dense(1))(decoder_outputs[0]) # model = tf.keras.models.Model(encoder_inputs,decoder_outputs[1]) self.log.info(model.summary()) model.compile(optimizer=optimizer(hp.Choice('learning_rate',values=[1e-1,1e-2,1e-3,1e-4])),loss=loss,metrics=[self.loss_fn]) except Exception as e: import traceback self.log.info(",Hyperparam tuning build_model err msg: \n"+ str(e)) self.log.info("Hyperparam tuning build_model err traceback: \n"+str(traceback.print_exc())) return model ##LSTM ecncoder decoder with multivariate input and univarite output prediction function (lstm model, train, prediction, metrics) def lstm_encdec_mvin_uvout(self,df): try: loss=self.loss_fn self.getdlparams() n_features = len(df.columns) self.num_features=n_features n_past= self.look_back n_future = self.look_back try: if (type(self.targetFeature) is list): pass else: self.targetFeature = list(self.targetFeature.split(",")) except: pass targetColIndx = [] for target in self.targetFeature: targetColIndx.append(df.columns.get_loc(target)) #if user doesnt applies any transformation, this will get applied scaler=MinMaxScaler() df_trnsf=scaler.fit_transform(df) train_data, test_data = train_test_split(df_trnsf, test_size=0.2, shuffle=False) tuner_alg=self.tuner_algorithm #The below create_dataset only for getting best model and best hyperparameters X_train, y_train = self.create_dataset(train_data, n_past, n_future, targetColIndx) X_test, y_test = self.create_dataset(test_data, n_past, n_future, targetColIndx) # X_train = X_train.reshape((X_train.shape[0], X_train.shape[1],n_features)) # y_train = y_train.reshape((y_train.shape[0], y_train.shape[1], 1)) self.log.info("Hyperparameter tuning algorithm is given by user (AION->Advanced configuration -> timeSeriesForecasting->LSTM): \n"+str(tuner_alg)) try: ## Remove untitled_project dir in AION root folder created by previous tuner search run import shutil shutil.rmtree(r".\untitled_project") except: pass try: if (tuner_alg.lower()=="randomsearch"): tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=1,executions_per_trial=3) elif (tuner_alg.lower()=="bayesianoptimization"): tuner=BayesianOptimization(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3) elif (tuner_alg.lower()=="hyperband"): tuner=Hyperband(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_epochs=50,factor=3) else: self.log.info("The given alg is not implemented. Using default hyperparam tuning algorithm: RandomSearch.\n") tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3) from keras.callbacks import EarlyStopping stop_early = EarlyStopping(monitor='val_loss', patience=5) except Exception as e: import traceback self.log.info("The given alg have some issue, Using default hyperparam tuning algorithm: RandomSearch.\n"+str(e)) tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=1,executions_per_trial=3) self.log.info("Started Exception default Random Search") #hpt search for best params try: self.log.info("First try: Tuner search started") tuner.search(X_train, y_train,validation_data=(X_test, y_test), callbacks=[stop_early]) self.log.info("First try: Tuner search ends") except Exception as e: self.log.info("Second try: Tuner search starts.\n"+str(e)) tuner.search(x=X_train,y=y_train,validation_split=0.2, callbacks=[stop_early]) self.log.info("Second try: Tuner search ends") # best_model = tuner.get_best_models(num_models=1)[0] #self.log.info("best_model.summary(): \n"+str(best_model.summary())) best_hps=tuner.get_best_hyperparameters(num_trials=1)[0] self.log.info("TS Multivariate LSTM best hyperparameter values:\n"+str(best_hps.values)) self.log.info("Activation fn:\n"+str(self.activation_fn)) n_input=self.look_back best_hmodel=tuner.hypermodel.build(best_hps) optimizer=self.optimizer learning_rate=float(best_hps.get('learning_rate')) try: ##TFSTask:7033, Added below try block for time series hyperparam tuning, here, for any optimizer, best learning_rate is provided from best_hps. try: if optimizer.lower() == "adam": optimizer=tensorflow.keras.optimizers.Adam(learning_rate=learning_rate) elif(optimizer.lower() == "adadelta"): optimizer=tensorflow.keras.optimizers.experimental.Adadelta(learning_rate=learning_rate) elif(optimizer.lower() == "nadam"): optimizer=tensorflow.keras.optimizers.experimental.Nadam(learning_rate=learning_rate) elif(optimizer.lower() == "adagrad"): optimizer=tensorflow.keras.optimizers.experimental.Adagrad(learning_rate=learning_rate) elif(optimizer.lower() == "adamax"): optimizer=tensorflow.keras.optimizers.experimental.Adamax(learning_rate=learning_rate) elif(optimizer.lower() == "rmsprop"): optimizer=tensorflow.keras.optimizers.experimental.RMSprop(learning_rate=learning_rate) elif(optimizer.lower() == "sgd"): optimizer=tensorflow.keras.optimizers.experimental.SGD(learning_rate=learning_rate) else: optimizer=tensorflow.keras.optimizers.Adam(learning_rate=learning_rate) except: optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate) pass ##From best hyperparameter values, now creating multivariate time series model using time generator. generatorTrain=TimeseriesGenerator(X_train, y_train, length=n_past, batch_size=self.batch_size) # generatorTest=TimeseriesGenerator(test,test,length=n_input,batch_size=self.batch_size) batch_0=generatorTrain[0] x,y=batch_0 epochs=int(self.epochs) ##Multivariate LSTM model try: encoder_l = {} encoder_outputs = {} encoder_states = {} decoder_l = {} decoder_outputs = {} enc_lstm_dropout = {} enc_input_unit = best_hps.get('enc_input_unit') enc_input_activation = best_hps.get('enc_input_activation') dec_input_activation = best_hps.get('dec_input_activation') dec_output_activation_1 = best_hps.get('dec_output_activation_1') enc_lstm_units = {} enc_lstm_activation = {} dec_lstm_activation = {} for indx in range(self.hidden_layers): lindx = indx + 1 enc_lstm_units[lindx] = best_hps.get('enc_lstm_units_'+str(lindx)) enc_lstm_activation[lindx] = best_hps.get('enc_lstm_activation_'+str(lindx)) dec_lstm_activation[lindx] = best_hps.get('dec_lstm_activation_'+str(lindx)) enc_lstm_dropout[lindx] = best_hps.get('enc_lstm_dropout_'+str(lindx)) encoder_inputs = Input(shape=(n_past, n_features)) if(self.hidden_layers > 0): encoder_l[0] = LSTM(enc_input_unit, activation = enc_input_activation, return_sequences = True, return_state=True) else: encoder_l[0] = LSTM(enc_input_unit, activation = enc_input_activation, return_state=True) encoder_outputs[0] = encoder_l[0](encoder_inputs) encoder_states[0] = encoder_outputs[0][1:] if(self.hidden_layers > 0): for indx in range(self.hidden_layers): lindx = indx + 1 if lindx == self.hidden_layers: encoder_l[lindx] = LSTM(enc_lstm_units[lindx], dropout = enc_lstm_dropout[lindx], activation = enc_lstm_activation[lindx], return_state=True) else: encoder_l[lindx] = LSTM(enc_lstm_units[lindx], dropout = enc_lstm_dropout[lindx], activation = enc_lstm_activation[lindx], return_sequences = True, return_state=True) encoder_outputs[lindx] = encoder_l[lindx](encoder_outputs[indx][0]) encoder_states[lindx] = encoder_outputs[lindx][1:] decoder_inputs = RepeatVector(n_future)(encoder_outputs[self.hidden_layers][0]) else: decoder_inputs = RepeatVector(n_future)(encoder_outputs[0][0]) # if(self.hidden_layers > 0): decoder_l[0] = LSTM(encoder_states[0][0].get_shape()[1], activation = dec_input_activation, return_sequences=True)(decoder_inputs,initial_state = encoder_states[0]) else: decoder_l[0] = LSTM(encoder_states[0][0].get_shape()[1], activation = dec_input_activation, return_sequences=True)(decoder_inputs,initial_state = encoder_states[0]) if(self.hidden_layers > 0): for indx in range(self.hidden_layers): lindx = indx + 1 decoder_l[lindx] = LSTM(encoder_states[lindx][0].get_shape()[1], activation = dec_lstm_activation[lindx], return_sequences=True)(decoder_l[indx],initial_state = encoder_states[lindx]) decoder_outputs[0] = TimeDistributed(tf.keras.layers.Dense(decoder_l[self.hidden_layers][0].get_shape()[1], activation = dec_output_activation_1))(decoder_l[self.hidden_layers]) decoder_outputs[1] = TimeDistributed(tf.keras.layers.Dense(1))(decoder_outputs[0]) else: decoder_outputs[0] = TimeDistributed(tf.keras.layers.Dense(decoder_l[0][0].get_shape()[1], activation = dec_output_activation_1))(decoder_l[0]) decoder_outputs[1] = TimeDistributed(tf.keras.layers.Dense(1))(decoder_outputs[0]) # model = tf.keras.models.Model(encoder_inputs,decoder_outputs[1]) self.log.info(model.summary()) self.log.info("loss="+self.loss_fn) model.compile(optimizer=optimizer,loss=self.loss_fn,metrics=[self.loss_fn]) #model.fit_generator(generatorTrain, epochs=epochs,shuffle=False, verbose=0) model.fit(X_train, y_train, batch_size=self.batch_size, epochs=epochs,shuffle=False, verbose=2) except Exception as e: import traceback self.log.info("multivariate model build error: error msg:: \n"+str(e)) return 'Error',0,0,0,0,None,pd.DataFrame(),0,None #predictions = model.predict_generator(generatorTest) except Exception as e: import traceback self.log.info("optimizer and timesereis generator build error: error msg:: \n"+str(e)) self.log.info("optimizer and timesereis generator build error traceback: \n"+str(traceback.print_exc())) return 'Error',0,0,0,0,None,pd.DataFrame(),0,None try: predictions=[] X_test, y_test = self.create_dataset(test_data, n_past, n_future, targetColIndx) predictions = model.predict(X_test) self.log.info(predictions) #convert the x test(includes target) to 2d as inverse transform accepts only 2d values xtestlen = len(X_test) xtest_2d = X_test.ravel().reshape(xtestlen * n_past, n_features) #inverse tranform of actual value xtest_2d = scaler.inverse_transform(xtest_2d) actual = xtest_2d[:, targetColIndx] #inverse tranform of predicted value prediction_1d = predictions.ravel() prediction_1d = prediction_1d.reshape(len(prediction_1d),1) self.log.info(prediction_1d) xtest_2d[:, targetColIndx] = prediction_1d xtest_2d = scaler.inverse_transform(xtest_2d) predictions = xtest_2d[:, targetColIndx] mse=None rmse=None ## Creating dataframe for actual,predictions try: pred_cols=list() actual_cols=list() for i in range(len(self.targetFeature)): pred_cols.append(self.targetFeature[i]+'_pred') actual_cols.append(self.targetFeature[i]+'_actual') predictions = pd.DataFrame(predictions.ravel(), columns=pred_cols) actual = pd.DataFrame(actual.ravel(), columns=actual_cols) df_predicted=pd.concat([actual,predictions],axis=1) self.log.info("LSTM Multivariate prediction dataframe: \n"+str(df_predicted)) from math import sqrt from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error target=self.targetFeature mse_dict={} rmse_dict={} mae_dict={} mape_dict={} r2_dict={} lstm_var = 0 self.log.info(actual.shape) self.log.info(actual) self.log.info(predictions.shape) self.log.info(predictions) mse = mean_squared_error(actual,predictions) mse_dict[self.targetFeature[0]]=mse rmse=sqrt(mse) rmse_dict[self.targetFeature[0]]=rmse lstm_var = lstm_var+rmse self.log.info("Name of the target feature: "+str(self.targetFeature)) self.log.info("RMSE of the target feature: "+str(rmse)) r2 = r2_score(actual,predictions) r2_dict[self.targetFeature[0]]=r2 mae = mean_absolute_error(actual,predictions) mae_dict[self.targetFeature[0]]=mae mape = mean_absolute_percentage_error(actual,predictions) mape_dict[self.targetFeature[0]]=mape ## For VAR comparison, send last target mse and rmse from above dict lstm_var = lstm_var/len(target) select_msekey=list(mse_dict.keys())[-1] l_mse=list(mse_dict.values())[-1] select_rmsekey=list(rmse_dict.keys())[-1] l_rmse=list(rmse_dict.values())[-1] select_r2key=list(r2_dict.keys())[-1] l_r2=list(r2_dict.values())[-1] select_maekey=list(mae_dict.keys())[-1] l_mae=list(mae_dict.values())[-1] l_mape=list(mape_dict.values())[-1] self.log.info("Selected target feature of LSTM for best model selection: "+str(select_rmsekey)) self.log.info("lstm rmse: "+str(l_rmse)) self.log.info("lstm mse: "+str(l_mse)) self.log.info("lstm r2: "+str(l_r2)) self.log.info("lstm mae: "+str(l_mae)) self.log.info("lstm mape: "+str(l_mape)) except Exception as e: import traceback self.log.info("prediction error traceback: \n"+str(traceback.print_exc())) except Exception as e: import traceback self.log.info("dataframe creation error. err.msg: "+str(e)) self.log.info("dataframe creation error traceback: \n"+str(traceback.print_exc())) return 'Error',0,0,0,0,None,pd.DataFrame(),0,None return 'Success',round(l_mse,2),round(l_rmse,2),round(l_r2,2),round(l_mae,2),model,df_predicted,n_input,scaler # import os #predicted_file_name='lstm_prediction_df.csv' #predicted_file_path=os.path.join(self.dataFolderLocation,predicted_file_name) #df_predicted.to_csv(predicted_file_path) ##save model #model_path = os.path.join(self.dataFolderLocation,self.model_name) #self.log.info("mlp model saved at: "+str(model_path)) #model.save(model_path) except Exception as e: import traceback ## Just use below traceback print to get detailed error information. # import traceback # print(" traceback error 7:\n",traceback.print_exc()) ## Enable traceback for debugging self.log.info("dataframe creation error. err.msg: "+str(e)) self.log.info("Final exception traceback: \n"+str(traceback.print_exc())) return 'Error',0,0,0,0,None,pd.DataFrame(),0,None
ts_modelvalidation.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import json #Python sklearn & std libraries import numpy as np import pandas as pd from time_series.ts_arima_eion import eion_arima from statsmodels.tsa.vector_ar.vecm import coint_johansen from statsmodels.tsa.vector_ar.var_model import VAR from math import * from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error from math import sqrt import logging import os import sys import time from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error from pandas import read_csv from statsmodels.tsa.stattools import adfuller import pmdarima as pm from statsmodels.tsa.stattools import grangercausalitytests from statsmodels.stats.stattools import durbin_watson from sklearn.utils import check_array class timeseriesModelTests(): def __init__(self,data,targetFeature,datetimeFeature,count): #self.tsConfig = tsConfig #self.modelconfig = modelconfig #self.modelList = modelList self.data = data self.targetFeature = targetFeature self.dateTimeFeature = datetimeFeature self.count=count self.log = logging.getLogger('eion') def StatinaryChecks(self,dictDiffCount): self.log.info("\n---------------Start Stationary Checks-----------") tFeature = self.targetFeature.split(',') tFeature.append(self.dateTimeFeature) self.data=self.data[tFeature] tFeature.remove(self.dateTimeFeature) lengthtFeature=len(tFeature) diffCount=0 try : for features in (tFeature): XSt = self.data[features] XSt=XSt.values resultSt = adfuller(XSt,autolag='AIC') stationaryFlag = False #print(resultSt) self.log.info('-------> Features: '+str(features)) self.log.info('----------> ADF Statistic: '+str(resultSt[0])) self.log.info('----------> p-value: %f' % resultSt[1]) if resultSt[1]<= 0.05: self.log.info("-------------> Converted As Stationary Data") stationaryFlag = True else: self.log.info("-------------> Stationary Conversion Required") stationaryFlag = False self.log.info('----------> Critical Values') for key, value in resultSt[4].items(): self.log.info('----------> '+str(key)+': '+str(value)) if stationaryFlag == False: self.data[features]=self.data[features].diff() self.data=self.data.dropna() dictDiffCount[features]=1 XStt = self.data[features] XStt=XStt.values resultStt = adfuller(XStt) if resultStt[1] > 0.05: self.data[features]=self.data[features].diff() self.data=self.data.dropna() dictDiffCount[features]=2 XSttt = self.data[features] XSttt=XSttt.values resultSttt = adfuller(XSttt) if resultSttt[1]<= 0.05: stationaryFlag = True else: stationaryFlag = True self.log.info("------------->"+str(dictDiffCount)) if stationaryFlag == True: self.log.info("----------> Equals to Stationary Data") else: self.log.info("----------> Not Equal To Stationary Data") self.log.info("-------> Stationary data diff()") self.log.info(dictDiffCount) self.log.info("---------------Start Stationary Checks Ends-----------\n") return self.data,dictDiffCount except Exception as inst: self.log.info('<!------------- Time Series Stationary Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def varTimeseriesModelTests(self,data): try : tFeature = self.targetFeature.split(',') self.log.info("\n--------- Start Granger Causality Test Results ------------") gtest=grangercausalitytests(data[tFeature], maxlag=15, addconst=True, verbose=True) self.log.info("-------> GrangerCausalitytest Results "+str(gtest.values())) self.log.info("--------- End Granger Causality Test Results ------------\n") return gtest except Exception as inst: self.log.info('<!------------- Time Series Granger Causality testTest Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def grangersCausationMatrix(self,data, variables, test='ssr_chi2test', verbose=False): try : countVariables=0 self.log.info(len(variables)) self.log.info("\n--------------Start GrangersCausationMatrix---------------") df = pd.DataFrame(np.zeros((len(variables), len(variables))), columns=variables, index=variables) for c in df.columns: for r in df.index: test_result = grangercausalitytests(data[[r, c]], maxlag=12, verbose=False) p_values = [round(test_result[i+1][0][test][1],4) for i in range(12)] if verbose: print(f'Y = {r}, X = {c}, P Values = {p_values}') min_p_value = np.min(p_values) df.loc[r, c] = min_p_value df.columns = [var + '_x' for var in variables] df.index = [var + '_y' for var in variables] self.log.info(df) for i in range(len(variables)): for j in range(len(variables)): if i!=j and df.iloc[i][j]<0.05 and df.iloc[i][j]<0.05: countVariables=countVariables+1 self.log.info("--------------End GrangersCausationMatrix---------------\n") return df,countVariables except Exception as inst: self.log.info('<!------------- Time Series grangersCausationMatrix Test Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return df,countVariables def coIntegrationTest(self,data): try : tdata = data.drop([self.dateTimeFeature], axis=1) tdata.index = data[self.dateTimeFeature] cols = tdata.columns self.log.info("\n-------------- Start of the Co Integration test ---------------") lenTargetFeature=len(self.targetFeature) countIntegrationFeature=0 N, l = tdata.shape jres = coint_johansen(tdata, 0, 1) trstat = jres.lr1 tsignf = jres.cvt for i in range(l): if trstat[i] > tsignf[i, 1]: r = i + 1 jres.r = r jres.evecr = jres.evec[:, :r] jres.r = r countIntegrationFeature=jres.r jres.evecr = jres.evec[:, :r] self.log.info('------->coint_johansen trace statistics: '+str(trstat)) self.log.info('------->coint_johansen critical values:') self.log.info(tsignf) self.log.info("------->There are "+str(countIntegrationFeature)+" Co-Integration vectors") self.log.info("-------------- End of the Co Integration test ---------------\n") return countIntegrationFeature except Exception as inst: self.log.info('<!------------- Time Series Co-Integration Test Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
tsStationarySeasonalityTest.py
import pandas as pd import numpy as np from statsmodels.tsa.stattools import adfuller from statsmodels.tsa.stattools import kpss from statsmodels.tsa.seasonal import seasonal_decompose import logging import os import warnings warnings.filterwarnings('ignore') ## Main class to find out seassonality and stationary in timeseries data. class tsStationarySeasonalityTest: def __init__(self,df,deployLocation): self.df=df self.deployLocation=deployLocation self.log = logging.getLogger('eion') ## to get the timeseries data stationary information def stationary_model(self,df,target_features,stationary_check_method): self.log.info("<------ Time Series stationary test started.....------------->\n") self.log.info("<------ Feature used:------------->\t"+str(target_features)) stationary_status=None if (stationary_check_method.lower()=='adfuller'): stats_model=adfuller(df[target_features]) # p_val=adf_result[1] statistic, p_value, n_lags, num_bservations,critical_values,info_criterion_best=stats_model[0],stats_model[1],stats_model[2],stats_model[3],stats_model[4],stats_model[5] ##Uncomment below logs when required. self.log.info("Adfuller test (time series stationary test) p_value: \t"+str(p_value)) # self.log.info("Adfuller test (time series stationary test) statistics: \t"+str(statistic)) # self.log.info("Adfuller test (time series stationary test) number of lags (time steps): \t"+str(n_lags)) # self.log.info("Adfuller test (time series stationary test) Critical values: \n") ##To display critical values # for key, value in stats_model[4].items(): # self.log.info(" \t"+str(key)+"\t"+str(value)) if (p_value>0.05): stationary_status="feature is non-stationary" self.log.info('Status:- |... '+str(target_features)+' is non stationary') elif(p_value<0.05): stationary_status="feature is stationary" self.log.info('Status:- |... '+str(target_features)+' is stationary') ##kpss is opposite to ADF in considering null hypothesis. In KPSS, if null hypothesis,then it is stationary as oppose to ADF. elif (stationary_check_method.lower()=='kpss'): from statsmodels.tsa.stattools import kpss stats_model = kpss(df[target_features]) statistic, p_value, n_lags, critical_values=stats_model[0],stats_model[1],stats_model[2],stats_model[3] self.log.info("kpss test (time series stationary test) p_value: \t"+str(p_value)) self.log.info("kpss test (time series stationary test) statistics: \t"+str(statistic)) self.log.info("kpss test (time series stationary test) number of lags (time steps): \t"+str(n_lags)) self.log.info("kpss test (time series stationary test) Critical values: \n") for key, value in stats_model[3].items(): self.log.info(" \t"+str(key)+"\t"+str(value)) ##In kpss, the stationary condition is opposite to Adafuller. if (p_value>0.05): self.log.info('Status:- |... '+str(target_features)+' is stationary') else: self.log.info('Status:- |... '+str(target_features)+' is non stationary') return stats_model,n_lags,p_value,stationary_status ## Get stationary details def stationary_check(self,target_features,time_col,method): df=self.df try: df[time_col]=pd.to_datetime(df[time_col]) except Exception as e: self.log.info("issue in datetime conversion...\n"+str(e)) df=df.set_index(time_col) try: stationary_check_method=method except: stationary_check_method='adfuller' if (len(target_features) == 1): try: if isinstance(target_features,list): target_features=''.join(target_features) elif isinstance(target_features,int): target_features=str(target_features) elif isinstance(target_features,str): pass except Exception as e: self.log.info("stationary check target feature error: \t"+str(e)) stationary_result={} stats_model,n_lags,p_value,stationary_status=self.stationary_model(df,target_features,stationary_check_method) stationary_result[target_features]=stationary_status elif(len(target_features) > 1): stationary_result={} for col in df.columns: # self.log.info("Multivariate feature for Stationary check:\t"+str(col)) stats_model,n_lags,p_value,stationary_status=self.stationary_model(df,col,stationary_check_method) stationary_result[col]=stationary_status else: self.log.info("TS Stationarity Test: Error in target feature, pls check.\n.") # self.log.info("Feature based stationarity_result:\n"+str(stationary_result)) # ## Stationary component for whole dataset stationary_combined_res=dict() # stats_model,n_lags,p_value,stationary_status=self.stationary_all_features(time_col,'adfuller') c_dict=[k for k,v in stationary_result.items() if 'non-stationary' in v] if (len(c_dict)>=1): stationary_combined_res['dataframe_stationarity']='Non-Stationary' self.log.info('Status:- |... Data is non stationarity') else: stationary_combined_res['dataframe_stationarity']='Stationary' # self.log.info("Stationarity information for whole dataset:\n"+str(stationary_combined_res)) self.log.info("Time series Stationarity test completed.\n") return stats_model,n_lags,p_value,stationary_result,stationary_combined_res #Get seasonality by using seasonal_decompose lib. def seasonality_model(self,target_features,df): self.log.info("<------ Time Series Seasonality test started.....------------->\n") self.log.info("<------ Feature used:------------->\n"+str(target_features)) seasonality_status=None try: try: stats_model = kpss(df[target_features]) statistic, p_value, n_lags, critical_values=stats_model[0],stats_model[1],stats_model[2],stats_model[3] except: n_lags=1 pass try: df_target=self.df[target_features] decompose_result_mult = seasonal_decompose(df_target,model='additive', extrapolate_trend='freq', period=n_lags) except Exception as e: self.log.info("Logging seasonality_model decompose_result_mult: \t"+str(e)) ##If additive model (type of seasonal component) failed, use multiplicative decompose_result_mult = seasonal_decompose(df_target,model='multiplicative', extrapolate_trend='freq', period=1) trend = decompose_result_mult.trend observed=decompose_result_mult.observed seasonal = decompose_result_mult.seasonal residual = decompose_result_mult.resid try: if isinstance(df_target, pd.Series): auto_correlation = df_target.autocorr(lag=n_lags) # self.log.info("seasonality test: auto_correlation value:\n"+str(auto_correlation)) elif isinstance(df_target, pd.DataFrame): df_target = df_target.squeeze() auto_correlation = df_target.autocorr(lag=n_lags) # self.log.info("seasonality test: auto_correlation value:\n"+str(auto_correlation)) except: pass self.log.info("<------------------ Time series Seasonality test result:------------------>") if (seasonal.sum()==0): seasonality_status="feature don't have seasonality (non seasonality)." self.log.info('Status:- |... '+str(target_features)+' does not have seasonality') self.log.info("<----- The model feature: "+str(target_features)+" does not have significant seasonality.----->\n") else: seasonality_status="feature has seasonality." self.log.info('Status:- |... '+str(target_features)+' have seasonality') ##Please use the below plot for GUI show (seasonality components) # decompose_result_mult.plot() df['observed'] = decompose_result_mult.observed df['residual'] = decompose_result_mult.resid df['seasonal'] = decompose_result_mult.seasonal df['trend'] = decompose_result_mult.trend df_name='timeseries_seasonality_check_'+f"{target_features}"+'.csv' dir_n = os.path.join(self.deployLocation,'data','seasonality') if not os.path.exists(dir_n): os.makedirs(dir_n) model_path=os.path.join(dir_n,df_name) self.log.info("Seasonality information saved as dataframe at:\t "+str(model_path)) ## Seasonal component for whole dataset df.to_csv(model_path) except Exception as e: self.log.info("Seasonality function exception: \t"+str(e)) return df,decompose_result_mult,seasonality_status ##Main function to check seasonlity in data def seasonal_check(self,target_features,time_col,seasonal_model): df=self.df # self.log.info("seasonal check started... \n") try: df[time_col]=pd.to_datetime(df[time_col]) except Exception as e: self.log.info("Issue in datetime conversion...\n"+str(e)) df=df.set_index(time_col) if (len(target_features)==1): try: if isinstance(target_features,list): target_features=''.join(target_features) elif isinstance(target_features,int): target_features=str(target_features) elif isinstance(target_features,str): pass except Exception as e: self.log.info("stationary check target feature error: \t"+str(e)) ## Seasonal component for individual feature based. seasonality_result=dict() df,decompose_result_mult,seasonality_status = self.seasonality_model(target_features,df) seasonality_result[target_features]=seasonality_status elif(len(target_features) > 1): seasonality_result=dict() self.log.info("TS seasonality Test: The problem type is time series Multivariate.") for col in df.columns: df,decompose_result_mult,seasonality_status = self.seasonality_model(col,df) seasonality_result[col]=seasonality_status else: self.log.info("TS seasonality Test: Error in target feature, pls check.\n.") # self.log.info("Feature based seasonality_result:\n"+str(seasonality_result)) # ## Seasonal component for whole dataset seasonality_combined_res=dict() c_dict=[k for k,v in seasonality_result.items() if 'non seasonality' in v] if (len(c_dict)>=1): seasonality_combined_res['dataframe_seasonality']='No Seasonal elements' else: seasonality_combined_res['dataframe_seasonality']='contains seasonal elements.' # self.log.info("Seasonality information for whole dataset:\n"+str(seasonality_combined_res)) self.log.info("Time series Seasonality test completed.\n") return df,decompose_result_mult,seasonality_result,seasonality_combined_res #Main fn for standalone test purpose if __name__=='__main__': print("Inside seasonality-stationary test main function...") print("Below code used for standalone test purpose.") # df=pd.read_csv(r"C:\AION_Works\Data\order_forecast_ts.csv") # print("df info: \n",df.info()) # df=df.drop('index',axis=1) # time_col="DateTime" # target='order1' # stationary_method='adfuller' # seasonal_model="additive" ## two models are available: 1.multiplicative, 2.additive # if (isinstance(target,list)): # pass # elif (isinstance(target,str)): # target=list(target.split(',')) # cls_ins=aion_ts_stationary_seassonality_test(df) # stats_model,n_lags,p_value=cls_ins.stationary_check(target,time_col,stationary_method) # df,decompose_result_mult=cls_ins.seasonal_check(target,time_col,seasonal_model) # print(" Time series stationary and seasonality check completed.")
timeseriesDLUnivariate.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import pandas as pd # import os import tensorflow as tf import numpy as np from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split import math from sklearn.metrics import mean_squared_error from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Dropout from tensorflow.keras import Sequential from tensorflow.keras.layers import LSTM import logging # import kerastuner import keras_tuner #from keras_tuner.engine.hyperparameters import HyperParameters from keras_tuner.tuners import RandomSearch,BayesianOptimization ,Hyperband import warnings warnings.simplefilter("ignore", UserWarning) # from keras.models import load_model # from tensorflow.keras.optimizers import SGD # from tensorflow.keras.utils import load_model from tensorflow.keras.models import load_model class timeseriesDLUnivariate: def __init__(self,configfile,testpercentage,targetFeature,dateTimeFeature,modelName): self.look_back=None #Preprocessed dataframe # self.df=df self.savedmodelname=None self.deploy_location=None self.epochs=None self.batch_size=None self.hidden_layers=None self.optimizer=None self.activation_fn=None self.loss_fn=None self.first_layer=None self.dropout=None self.model_name=None self.hpt_train=None ##Below is model type (MLP or lstm) self.model_type=modelName #self.dataFolderLocation=str(dataFolderLocation) ##Added for ts hpt self.tuner_algorithm="" self.dl_params = configfile # self.data=data self.targetFeature=targetFeature self.dateTimeFeature=dateTimeFeature self.testpercentage = testpercentage self.log = logging.getLogger('eion') #To extract dict key,values def extract_params(self,dict): self.dict=dict for k,v in self.dict.items(): return k,v ##Get deep learning model hyperparameter from advanced config def getdlparams(self): val=self.dl_params self.log.info('-------> The given mlp/lstm timeseries algorithm parameters:>>') self.log.info(" "+str(val)) for k,v in val.items(): try: if (k == "tuner_algorithm"): self.tuner_algorithm=str(v) elif (k == "activation"): self.activation_fn=str(v) elif (k == "optimizer"): self.optimizer=str(v) elif (k == "loss"): self.loss_fn=str(v) elif (k == "first_layer"): if not isinstance(k,list): self.first_layer=str(v).split(',') else: self.first_layer=k elif (k == "lag_order"): if isinstance(k,list): k = ''.join(v) k=int(float(str(v))) else: self.look_back=int(float(str(v))) elif (k == "hidden_layers"): self.hidden_layers=int(v) elif (k == "dropout"): if not isinstance(k,list): self.dropout=str(v).split(',') else: self.dropout=k elif (k == "batch_size"): self.batch_size=int(v) elif (k == "epochs"): self.epochs=int(v) elif (k == "model_name"): self.model_name=str(v) except Exception as e: self.log.info('Exception occured in deeep learn param reading, setting up default params.') self.activation_fn="relu" self.optimizer="adam" self.loss_fn="mean_squared_error" self.first_layer=[8,512] self.hidden_layers=1 self.look_back=int(2) self.dropout=[0.1,0.5] self.batch_size=2 self.epochs=50 self.model_name="lstmmodel.h5" continue ## Just use this if user need to create dataframe from input data. def createdf(self,df): target="" # splitting reframed to X and Y considering the first column to be out target featureX=reframed.drop(['var1(t)'],axis=1) X=df.drop([target],axis=1) Y=df[target] X_values=X.values Y_values=Y.values n_predict=len(Y_values) train_X,train_Y = X_values[:(X_values.shape[0]-n_predict),:],Y_values[:(X_values.shape[0]-n_predict)] test_X,test_Y = X_values[(X_values.shape[0]-n_predict):,:],Y_values[(X_values.shape[0]-n_predict):] #reshaping train and test to feed to LSTM train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1])) test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1])) return train_X,train_Y,test_X,test_Y # convert an array of values into a dataset matrix def numpydf(self,dataset, look_back): dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): a = dataset[i:(i+look_back), 0] dataX.append(a) dataY.append(dataset[i + look_back, 0]) # x,y=numpy.array(dataX), numpy.array(dataY) return np.array(dataX), np.array(dataY) def model_save(self,model): import os.path savedmodelname=self.model_name path = os.path.join(self.deploy_location,savedmodelname) model.save(path) return (savedmodelname) ## MLP model buid def mlpDL(self,df): self.log.info("MLP timeseries learning starts.....") try: self.getdlparams() # look_back = self.look_back dataset = df.values dataset = dataset.astype('float32') ##The below Kwiatkowski-Phillips-Schmidt-Shin (kpss) statsmodel lib used for stationary check as well getting number of lags. ##number of lag calculated just for reference ,not used now. #Dont delete this, just use in future. from statsmodels.tsa.stattools import kpss statistic, p_value, n_lags, critical_values = kpss(df[self.targetFeature]) self.log.info("Based on kpss statsmodel, lag order (time steps to calculate next prediction) is: \t"+str(n_lags)) scaler = MinMaxScaler(feature_range=(0, 1)) dataset = scaler.fit_transform(dataset) # split into train and test sets train_size = int(len(dataset) * 0.80) test_size = len(dataset) - train_size train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:] self.hpt_train=train tuner_alg=self.tuner_algorithm try: ## Remove untitled_project dir in AION root folder created by previous tuner search run import shutil shutil.rmtree(r".\untitled_project") except: pass if (tuner_alg.lower()=="randomsearch"): tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3) elif (tuner_alg.lower()=="bayesianoptimization"): tuner=BayesianOptimization(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3) elif (tuner_alg.lower()=="hyperband"): tuner=Hyperband(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_epochs=50,factor=3) # tuner.search(X[...,np.new_axis],y,epochs=2,validation_data=(y[...,np.newaxis])) stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5) try: tuner.search(x=train,y=train,validation_data=(test,test),callbacks=[stop_early]) except: tuner.search(x=train,y=train,validation_split=0.2,callbacks=[stop_early]) # best_model=tuner.get_best_models(num_models=1)[0] best_hps=tuner.get_best_hyperparameters(num_trials=1)[0] best_first_layer=best_hps.get('units') best_dropout=best_hps.get('Dropout_rate') best_learning_rate=float(best_hps.get('learning_rate')) self.log.info("best hyperparameter values for mlp: \n"+str(best_hps.values)) look_back = 1 ## Because univariate problemtype trainX, trainY = self.numpydf(train, look_back) testX, testY = self.numpydf(test, look_back) best_hmodel=tuner.hypermodel.build(best_hps) ##Added for mlp issue,because tuner build also need to compile. try: best_hmodel.compile(loss=self.loss_fn, optimizer=self.optimizer) except: pass model_fit = best_hmodel.fit(trainX, trainY, epochs=self.epochs, batch_size=self.batch_size, verbose=2) val_acc_per_epoch = model_fit.history['loss'] best_epoch = val_acc_per_epoch.index(min(val_acc_per_epoch)) + 1 self.log.info("MLP best epochs value:\n"+str(best_epoch)) trainScore = best_hmodel.evaluate(trainX, trainY, verbose=0) testScore = best_hmodel.evaluate(testX, testY, verbose=0) #Scoring values for the model mse_eval=testScore try: #If mse_eval is list of values min_v=min(mse_eval) except: #If mse_eval is single value min_v=mse_eval rmse_eval = math.sqrt(min_v) # generate predictions for training trainPredict = best_hmodel.predict(trainX) #print(testX) testPredict = best_hmodel.predict(testX) #print(testPredict) # invert predictions, because we used mimanmax scaler trainY = scaler.inverse_transform([trainY]) trainPredict = scaler.inverse_transform(trainPredict) ## For test data testY = scaler.inverse_transform([testY]) testPredict = scaler.inverse_transform(testPredict) ## Creating dataframe for actual,predictions predictions = pd.DataFrame(testPredict, columns=[self.targetFeature+'_pred']) actual = pd.DataFrame(testY.T, columns=[self.targetFeature+'_actual']) df_predicted=pd.concat([actual,predictions],axis=1) #print(df_predicted) from math import sqrt from sklearn.metrics import mean_squared_error try: mse_mlp = mean_squared_error(testY.T,testPredict) rmse_mlp=sqrt(mse_mlp) self.log.info('mse_mlp: '+str(mse_mlp)) self.log.info('rmse_mlp: '+str(rmse_mlp)) from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error r2 = r2_score(testY.T,testPredict) mae = mean_absolute_error(testY.T,testPredict) self.log.info('r2_mlp: '+str(r2)) self.log.info('mae_mlp: '+str(mae)) except Exception as e: import traceback self.log.info("MLP dataframe creation error traceback: \n"+str(traceback.print_exc())) self.log.info(e) # df_predicted.to_csv('mlp_prediction.csv') except Exception as e: self.log.info("MLP timeseries model traceback error msg e: "+str(e)) self.log.info("MLP training successfully completed.\n") return mse_mlp,rmse_mlp,r2,mae,best_hmodel,df_predicted,look_back,scaler ## Added function for hyperparam tuning (TFSTask:7033) def build_model(self,hp): try: loss=self.loss_fn optimizer=self.optimizer try: if optimizer.lower() == "adam": optimizer=tf.keras.optimizers.Adam elif(optimizer.lower() == "adadelta"): optimizer=tf.keras.optimizers.experimental.Adadelta elif(optimizer.lower() == "nadam"): optimizer=tf.keras.optimizers.experimental.Nadam elif(optimizer.lower() == "adagrad"): optimizer=tf.keras.optimizers.experimental.Adagrad elif(optimizer.lower() == "adamax"): optimizer=tf.keras.optimizers.experimental.Adamax elif(optimizer.lower() == "rmsprop"): optimizer=tf.keras.optimizers.experimental.RMSprop elif(optimizer.lower() == "sgd"): optimizer=tf.keras.optimizers.experimental.SGD else: optimizer=tf.keras.optimizers.Adam except: optimizer=tf.keras.optimizers.Adam pass first_layer_min=round(int(self.first_layer[0])) first_layer_max=round(int(self.first_layer[1])) dropout_min=float(self.dropout[0]) dropout_max=float(self.dropout[1]) model=tf.keras.Sequential() if (self.model_type.lower() == 'lstm'): model.add(LSTM(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_shape=(self.look_back,self.hpt_train.shape[1]), activation=hp.Choice('dense_activation',values=['relu']))) elif (self.model_type.lower() == 'mlp'): # model.add(Dense(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_dim=(hp.Int('time_steps',min_value=look_back_min,max_value=look_back_max,step=1)), # activation='relu')) ##input_dim is 1 because mlp is for univariate. model.add(Dense(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_dim=(1),activation='relu')) model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1))) model.add(Dense(units=1)) model.compile(optimizer=optimizer(hp.Choice('learning_rate',values=[1e-1,1e-2,1e-3,1e-4])),loss=loss,metrics=[loss]) except Exception as e: import traceback self.log.info("lstm errorbuild_model traceback: \n"+str(traceback.print_exc())) return model ##LSTM timeseries function call def ts_lstm(self,df): self.log.info("lstm network model learning starts.....\n") try: self.getdlparams() dataset = df.values dataset = dataset.astype('float32') ##The below Kwiatkowski-Phillips-Schmidt-Shin (kpss) statsmodel lib used for stationary check as well getting number of lags. ##number of lag calculated just for reference ,not used now. #Dont delete this, just use in future. from statsmodels.tsa.stattools import kpss statistic, p_value, n_lags, critical_values = kpss(df[self.targetFeature]) self.log.info("Based on kpss statsmodel, lag order (time steps to calculate next prediction) is: \t"+str(n_lags)) # normalize the dataset scaler = MinMaxScaler(feature_range=(0, 1)) dataset = scaler.fit_transform(dataset) # split into train and test sets train_size = int(len(dataset) * 0.80) test_size = len(dataset) - train_size train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:] self.hpt_train=train tuner_alg=self.tuner_algorithm try: ## Remove untitled_project dir in AION root folder created by previous tuner search run import shutil shutil.rmtree(r".\untitled_project") except: pass if (tuner_alg.lower()=="randomsearch"): tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3) elif (tuner_alg.lower()=="bayesianoptimization"): tuner=BayesianOptimization(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3) elif (tuner_alg.lower()=="hyperband"): tuner=Hyperband(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_epochs=50,factor=3) # tuner.search(X[...,np.new_axis],y,epochs=2,validation_data=(y[...,np.newaxis])) from keras.callbacks import EarlyStopping stop_early = EarlyStopping(monitor='val_loss', patience=5) ##Need both x and y with same dimention. tuner.search(x=train,y=train,validation_split=0.2,callbacks=[stop_early]) # tuner.search(x=train,y=test,validation_data=(test,test),callbacks=[stop_early]) best_hps=tuner.get_best_hyperparameters(num_trials=1)[0] best_time_steps=self.look_back self.log.info("best lag order or lookback (time_steps) for LSTM: \n"+str(best_time_steps)) self.log.info("best hyperparameter values for LSTM: \n"+str(best_hps.values)) look_back = best_time_steps trainX, trainY = self.numpydf(train, look_back) testX, testY = self.numpydf(test, look_back) # reshape input to be [samples, time steps, features] trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1)) testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1)) #create and fit the LSTM network best_hmodel=tuner.hypermodel.build(best_hps) try: best_hmodel.compile(loss=self.loss_fn, optimizer=self.optimizer) except: pass model_fit = best_hmodel.fit(trainX, trainY, validation_split=0.2, epochs=self.epochs, batch_size=self.batch_size, verbose=2) val_acc_per_epoch = model_fit.history['loss'] best_epoch = val_acc_per_epoch.index(min(val_acc_per_epoch)) + 1 self.log.info("best epochs value:\n"+str(best_epoch)) # best_hmodel=tuner.hypermodel.build(best_hps) # best_hmodel.fit(x=trainX,y=trainY,validation_split=0.2,epochs=best_epoch) ##Using model_evaluate,calculate mse # mse_eval = model.evaluate(testX, testY, verbose=0) mse_eval = best_hmodel.evaluate(testX, testY, verbose=0) try: #If mse_eval is list of values min_v=min(mse_eval) except: #If mse_eval is single value min_v=mse_eval rmse_eval=math.sqrt(min_v) # self.log.info('LSTM mse:'+str(mse_eval)) # self.log.info('LSTM rmse:'+str(rmse_eval)) # lstm time series predictions trainPredict = best_hmodel.predict(trainX) testPredict = best_hmodel.predict(testX) # invert predictions, because we used mim=nmax scaler trainY = scaler.inverse_transform([trainY]) trainPredict = scaler.inverse_transform(trainPredict) testY = scaler.inverse_transform([testY]) testPredict = scaler.inverse_transform(testPredict) ## Creating dataframe for actual,predictions predictions = pd.DataFrame(testPredict, columns=[self.targetFeature+'_pred']) actual = pd.DataFrame(testY.T, columns=[self.targetFeature+'_actual']) df_predicted=pd.concat([actual,predictions],axis=1) from math import sqrt from sklearn.metrics import mean_squared_error try: mse_lstm=None mse_lstm = mean_squared_error(testY.T,testPredict) rmse_lstm=sqrt(mse_lstm) self.log.info("mse_lstm: "+str(mse_lstm)) self.log.info("rmse_lstm: "+str(rmse_lstm)) from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error r2 = r2_score(testY.T,testPredict) mae = mean_absolute_error(testY.T,testPredict) self.log.info('r2_lstm: '+str(r2)) self.log.info('mae_lstm: '+str(mae)) except Exception as e: self.log.info("lstm error loss fns"+str(e)) return 'Error',0,0,0,0,None,pd.DataFrame(),0,None except Exception as e: import traceback self.log.info("lstm training error traceback: \n"+str(traceback.print_exc())) return 'Error',0,0,0,0,None,pd.DataFrame(),0,None return 'Success',mse_lstm,rmse_lstm,r2,mae,best_hmodel,df_predicted,look_back,scaler if __name__ == '__main__': print('Inside timeseriesDLUnivariate main....\n') # tsdl_obj = timeseriesDLUnivariate() ## for testing purpose ''' df1= pd.read_csv(r"C:\aiontest\testPrograms\Data\energydemand.csv",encoding='utf-8', engine='python') dateTimeFeature = "utcTimeStamp" targetFeature="temperature" try: df1[dateTimeFeature] = pd.to_datetime(df1[dateTimeFeature]) #, format = '%d/%m/%Y %H.%M') except: pass tdata = df1.drop([dateTimeFeature], axis=1) tdata.index = df1[dateTimeFeature] tdata = pd.DataFrame(tdata[targetFeature]) cols = tdata.columns mse,rmse,model = tsdl_obj.mlpDL(tdata) lmse,lrmse,lstmmodel = tsdl_obj.ts_lstm(tdata) print("mlp mse: \n",mse) print("mlp rmse: \n",rmse) print("lstm mse: \n",lmse) print("lstm rmse: \n",lrmse) savedmodelname=tsdl_obj.model_save(lstmmodel) '''
aion_fbprophet.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' # For timeseries pyramid pdaarima module import json #Python sklearn & std libraries import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.feature_selection import VarianceThreshold from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error #from sklearn.metrics import mean_absolute_percentage_error from sklearn.linear_model import LinearRegression from math import sqrt import warnings # For serialization. #from sklearn.externals import joblib import pickle import os,sys # For ploting (mathlab) import matplotlib.pyplot as plt import plotly #Import eion config manager module import logging from sklearn import metrics from sklearn.metrics import accuracy_score import time import random import statsmodels.api as sm # prophet by Facebook # time series analysis #from statsmodels.tsa.seasonal import seasonal_decompose #from statsmodels.graphics.tsaplots import plot_acf, plot_pacf from prophet.plot import plot_plotly,plot_components_plotly #import seaborn as sns from sklearn.model_selection import ParameterGrid import holidays #from prophet.diagnostics import performance_metrics #from prophet.diagnostics import cross_validation from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error import logging,sys from scipy.special import inv_boxcox from prophet.diagnostics import cross_validation #from sklearn.metrics import mean_absolute_percentage_error warnings.filterwarnings("ignore") # Aion Prophet module class aion_fbprophet (): #Constructor def __init__(self,configfile,testpercentage,data,targetFeature,dateTimeFeature): try: self.tsprophet_params = configfile self.data=data self.targetFeature=targetFeature self.dateTimeFeature=dateTimeFeature self.testpercentage = testpercentage self.log = logging.getLogger('eion') except Exception as inst: self.log.info('<!------------- Prophet INIT Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) #Find datetime column def get_datetime_col(self,data): df=data dt_col=[] categorical_features=[] discrete_features=[] # Here, I am checking each column type, whether it is object type or float or int. Then I am trying to convert the # Object type to datetime format using python pd.to_datetime() function. If the column converts , it is datetime format, else it is some other format (categorical or discrete) for col in df.columns: if (df[col].dtype == 'object' or df[col].dtype == 'datetime64[ns]' ): try: df[col] = pd.to_datetime(df[col]) dt_col.append(col) except ValueError: categorical_features.append(col) pass elif (df[col].dtype == 'float64' or 'int64' or 'int' or 'float64' or 'float'): #('int' or 'float' or 'int64' or 'float64')): #print("discrete features found..\n") discrete_features.append(col) else: pass #Uncomment to know the datetime, categorical and continuous cols # print ("Date time colms: dt_col: \n",dt_col) # print("categorical features: \n",categorical_features) # print("continuous features: \n",discrete_features) return dt_col def get_predict_frequency(self,df,datetime_col_name): #dt_col=pd.to_datetime(df[datetime_col_name], format='%m/%d/%Y %H:%M:%S') dt_col=pd.to_datetime(df[datetime_col_name]) #df['tvalue'] = df[datetime_col_name] df['time_diff'] = (df[datetime_col_name]-df[datetime_col_name].shift()).fillna(pd.Timedelta('0')) mean_diff_dt=df['time_diff'].mean() time_diff_secs=mean_diff_dt.total_seconds() time_sec_2_hr=((time_diff_secs/60)/60) pred_freq="" time_sec_2_hr=round(time_sec_2_hr) #For abbreviation ,refer https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases if (time_sec_2_hr < 1): pred_freq="min" else: if (time_sec_2_hr >= 24): if (time_sec_2_hr > 168): if(time_sec_2_hr > 696 or time_sec_2_hr < 744): # based on 29 days, to 31 days if(time_sec_2_hr > 8760): pred_freq="Y" else: pred_freq="M" else: pred_freq="W" else: pred_freq="D" else: pred_freq="H" pass return pred_freq #To extract dict key,values def extract_params(self,dict): self.dict=dict for k,v in self.dict.items(): return k,v def mean_absolute_percentage_error(self,y_true, y_pred): if (y_true.isin([0]).sum() > 0): y_true=y_true.mask(y_true==0).fillna(y_true.mean()) try: y_true, y_pred=np.array(y_true), np.array(y_pred) #return np.mean(np.abs((y_true - y_pred) / y_true+sys.float_info.epsilon)) * 100 return np.mean(np.abs((y_true - y_pred) / y_true)) * 100 except Exception as inst: self.log.info('<------------- mean_absolute_percentage_error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def regressor_list(self,regressorstr): lst = regressorstr.split (",") reg_list=[] for i in lst: reg_list.append(i) #print(reg_list) return reg_list # def get_regressors(self,reg): # print("get extra inputs for prophet...\n") def aion_probhet(self,train_data,datetime_col_name,predicted_data_file,dataFolderLocation): from prophet import Prophet #Getting prophet params #key,val = self.extract_params(self.tsprophet_params) val=self.tsprophet_params self.log.info('-------> The given prophet algorithm parameters:>>') self.log.info(" "+str(val)) changepoint_prior_scale=[] changepoint_range=[] mcmc_samples=[] interval_width=[] holidays_prior_scale=[] n_changepoints=[] uncertainty_samples=[] seasonality_prior_scale=[] seasonality_mode="" yearly_seasonality=None weekly_seasonality=None daily_seasonality=None additional_regressors="" holiday_country_name="" holiday_years=[] no_of_periods=0 pred_frequncy="" for k,v in val.items(): try: if (k == "seasonality_mode"): seasonality_mode=v elif (k == "changepoint_prior_scale"): changepoint_prior_scale=[float(i) for i in v.split(',')] elif (k == "changepoint_range"): changepoint_range=[float(i) for i in v.split(',')] elif (k == "yearly_seasonality"): if v.lower() == 'true': yearly_seasonality=True elif v.lower() == 'false': yearly_seasonality=False elif v.lower() == 'auto': yearly_seasonality=v else: yearly_seasonality=True elif (k == "weekly_seasonality"): if v.lower() == 'true': weekly_seasonality=True elif v.lower() == 'false': weekly_seasonality=False elif v.lower() == 'auto': weekly_seasonality=v else: weekly_seasonality=False #weekly_seasonality=v elif (k == "daily_seasonality"): if v.lower() == 'true': daily_seasonality=True elif v.lower() == 'false': daily_seasonality=False elif v.lower() == 'auto': daily_seasonality=v else: daily_seasonality=False elif (k == "mcmc_samples"): mcmc_samples=[float(i) for i in v.split(',')] elif (k == "interval_width"): interval_width=[float(i) for i in v.split(',')] elif (k == "holidays_prior_scale"): #holidays_prior_scale=float(v) holidays_prior_scale=[float(i) for i in v.split(',')] elif (k == "n_changepoints"): n_changepoints=[int(i) for i in v.split(',')] elif (k == "uncertainty_samples"): uncertainty_samples=[float(i) for i in v.split(',')] elif (k == "seasonality_prior_scale"): seasonality_prior_scale=[float(i) for i in v.split(',')] elif (k == "additional_regressors"): additional_regressors=str(v) elif (k == "holiday_country_name"): holiday_country_name=v elif (k == "holiday_years"): holiday_years=[int(i) for i in v.split(',')] elif (k == "no_of_periods"): no_of_periods=int(v) elif (k == "pred_frequncy"): pred_frequncy=v else: self.log.info("Invalid string.") except Exception: continue try: start = time.time() datetime_col_name=str(datetime_col_name) target_col=str(self.targetFeature) #extra_regressors=additional_regressors reg_list=self.regressor_list(additional_regressors) get_dtcol="" get_dtcol=self.get_datetime_col(self.data)[0] #get predict frequency for user data pred_freq= str(self.get_predict_frequency(self.data,datetime_col_name)) if (pred_frequncy): pred_frequncy=pred_frequncy else: #If user not defined predict_freq in aion config or GUI, our algorithm will find automatically by get_predict_frequency() method pred_frequncy=pred_freq self.log.info("Auto Predict frequency period (Hour-H/Day-D/Week-W/Month-M/Year-Y): \n"+str(pred_frequncy)) #For proper datetime format check. self.data[self.dateTimeFeature] = pd.to_datetime(self.data[self.dateTimeFeature]) filterd_df = self.data.filter([get_dtcol,target_col]) holiday = pd.DataFrame([]) holiday_specified=holidays.CountryHoliday(holiday_country_name,years=holiday_years) for date, name in sorted(holiday_specified.items()): holiday = holiday.append(pd.DataFrame({'ds': date, 'holiday': "Holidays"}, index=[0]), ignore_index=True) holiday['ds'] = pd.to_datetime(holiday['ds'], format='%Y-%m-%d %H:%M:%S', errors='ignore') filterd_df=filterd_df.rename(columns={self.dateTimeFeature:'ds',target_col:'y'}) #Set seasonality model try: if not seasonality_mode: self.log.info('empty input for seasonality_mode parameter in aion configuration file.Please check. Setting default mode: additive. \n') seasonality_mode=[] seasonality_mode=['additive'] multiplicative_s="multiplicative" additive_s="additive" else: seasonality_mode = seasonality_mode.split(',') len_seasonality_mode=len(seasonality_mode) except ValueError as e: self.log.info(e) params_grid = {'seasonality_mode':(seasonality_mode), 'changepoint_prior_scale':changepoint_prior_scale, 'changepoint_range': changepoint_range, 'yearly_seasonality': [yearly_seasonality], 'weekly_seasonality': [weekly_seasonality], 'daily_seasonality': [daily_seasonality], 'mcmc_samples': mcmc_samples, 'interval_width': interval_width, 'holidays_prior_scale':holidays_prior_scale, 'n_changepoints' : n_changepoints, 'uncertainty_samples': uncertainty_samples, 'seasonality_prior_scale': seasonality_prior_scale} grid = ParameterGrid(params_grid) p_cnt = 0 for p in grid: p_cnt = p_cnt+1 self.log.info("--------------- Total Possible prophet iterations: --------------- \n") self.log.info(p_cnt) self.log.info("\n--------------- Modal Validation Start ---------------") size = int(len(filterd_df) * (100 - self.testpercentage)/100) train = filterd_df.loc[0:size] valid = filterd_df.loc[size:len(filterd_df)] self.log.info("------->Train Data Shape: "+str(train.shape)) self.log.info("------->Valid Data Shape"+str(valid.shape)) X_train = train X_test = valid len_test=len(X_test) #For add_regressor,copy the add_regressor columns to use. if (additional_regressors): df1=pd.DataFrame() df1[additional_regressors]=self.data[additional_regressors] model_parameters_mape = pd.DataFrame(columns = ['MAPE','Parameters']) model_parameters_rmse = pd.DataFrame(columns = ['rmse','Parameters']) model_parameters_mse = pd.DataFrame(columns = ['mse','Parameters']) model_parameters_mae = pd.DataFrame(columns = ['MAE','Parameters']) model_parameters_r2 = pd.DataFrame(columns = ['r2','Parameters']) for P in grid: pred_forecast = pd.DataFrame() random.seed(0) train_model =Prophet(changepoint_prior_scale = P['changepoint_prior_scale'], seasonality_mode=P['seasonality_mode'], changepoint_range=P['changepoint_range'], holidays_prior_scale = P['holidays_prior_scale'], n_changepoints = P['n_changepoints'], mcmc_samples=P['mcmc_samples'], interval_width=P['interval_width'], uncertainty_samples=P['uncertainty_samples'], seasonality_prior_scale= P['seasonality_prior_scale'], holidays=holiday, weekly_seasonality=P['weekly_seasonality'], daily_seasonality = P['daily_seasonality'], yearly_seasonality = P['yearly_seasonality'] ) train_forecast=pd.DataFrame() try: train_model.fit(X_train) train_forecast = train_model.make_future_dataframe(periods=len_test, freq=pred_frequncy,include_history = False) train_forecast = train_model.predict(train_forecast) except ValueError as e: self.log.info(e) self.log.info ("------->Check mcmc_samples value in aion confiuration, either 0 (default) or defined value,e.g.mcmc_samples:'300' to be set.If no idea on value, set to default.\n") pred_forecast=train_forecast[['ds','yhat']] Actual=X_test len_act=len(Actual['y']) len_pred=len(pred_forecast['yhat']) MAPE = self.mean_absolute_percentage_error(Actual['y'],abs(pred_forecast['yhat'])) model_parameters_mape = model_parameters_mape.append({'MAPE':MAPE,'Parameters':p},ignore_index=True) #MAE MAE = mean_absolute_error(Actual['y'],abs(pred_forecast['yhat'])) rmse = sqrt(mean_squared_error(Actual['y'],abs(pred_forecast['yhat']))) mse = mean_squared_error(Actual['y'],abs(pred_forecast['yhat'])) r2 = r2_score(Actual['y'],abs(pred_forecast['yhat'])) # self.log.info ("------->Prophet RMSE :"+str(rmse)) # self.log.info ("------->Prophet MSE :"+str(mse)) # self.log.info ("------->Prophet MAE :"+str(MAE)) # self.log.info ("------->Prophet R2 :"+str(r2)) model_parameters_mape = model_parameters_mape.append({'MAPE':MAPE,'Parameters':p},ignore_index=True) model_parameters_rmse = model_parameters_rmse.append({'rmse':rmse,'Parameters':p},ignore_index=True) model_parameters_mse = model_parameters_mse.append({'mse':mse,'Parameters':p},ignore_index=True) model_parameters_mae = model_parameters_mae.append({'MAE':MAE,'Parameters':p},ignore_index=True) model_parameters_r2 = model_parameters_r2.append({'r2':r2,'Parameters':p},ignore_index=True) #end of for loop parameters_mape = model_parameters_mape.sort_values(by=['MAPE']) parameters_mape = parameters_mape.reset_index(drop=True) best_params_mape=parameters_mape['Parameters'][0] # print("Best Parameters on which the model has the least MAPE is: \n",best_params_mape) best_mape_score=parameters_mape['MAPE'].iloc[0] #self.log.info('------->Mean absolute percent error log: \n ') #self.log.info('------->best_mape_score: \n '+str(best_mape_score)) parameters_rmse = model_parameters_rmse.sort_values(by=['rmse']) parameters_rmse = parameters_rmse.reset_index(drop=True) best_params_rmse=parameters_rmse['Parameters'][0] best_rmse_score=parameters_rmse['rmse'].iloc[0] #self.log.info('------->Root Man Squared Error log (Prophet timeseries): \n ') #self.log.info('------->best_rmse_score ((Prophet timeseries)): \n '+str(best_rmse_score)) #mse parameters_mse = model_parameters_mse.sort_values(by=['mse']) parameters_mse = parameters_mse.reset_index(drop=True) best_params_mse = parameters_mse['Parameters'][0] best_mse_score=parameters_mse['mse'].iloc[0] #MAE parameters_mae = model_parameters_mae.sort_values(by=['MAE']) parameters_mae = parameters_mae.reset_index(drop=True) best_params_mae = parameters_mae['Parameters'][0] best_mae_score=parameters_mae['MAE'].iloc[0] # R2 score parameters_r2 = model_parameters_r2.sort_values(by=['r2']) parameters_r2 = parameters_r2.reset_index(drop=False) best_params_r2 = parameters_r2['Parameters'][0] best_r2_score=parameters_r2['r2'].iloc[0] #Final best prophet mse,rmse,mape scores # self.log.info ("------->Prophet RMSE :"+str(best_rmse_score)) # self.log.info ("------->Prophet MSE :"+str(best_mse_score)) # self.log.info ("------->Prophet MAE :"+str(best_mae_score)) # self.log.info ("------->Prophet R2 :"+str(best_r2_score)) #Extracting best model parameters for k,v in best_params_mape.items(): try: if (k == "changepoint_prior_scale"): changepoint_prior_scale=float(v) elif (k == "changepoint_range"): changepoint_range=float(v) elif (k == "daily_seasonality"): daily_seasonality=v elif (k == "holidays_prior_scale"): holidays_prior_scale=float(v) elif (k == "interval_width"): interval_width=float(v) elif (k == "mcmc_samples"): mcmc_samples=float(v) elif (k == "n_changepoints"): n_changepoints=int(v) elif (k == "seasonality_mode"): seasonality_mode=str(v) elif (k == "seasonality_prior_scale"): seasonality_prior_scale=int(v) elif (k == "uncertainty_samples"): uncertainty_samples=float(v) elif (k == "weekly_seasonality"): weekly_seasonality=v elif (k == "yearly_seasonality"): yearly_seasonality=v else: pass except Exception as e: self.log.info("\n prophet time series config param parsing error"+str(e)) #continue self.log.info("\n Best prophet model accuracy parameters.\n ") #Prophet model based on mape best params. best_prophet_model = Prophet(holidays=holiday, changepoint_prior_scale= changepoint_prior_scale, holidays_prior_scale = holidays_prior_scale, n_changepoints = n_changepoints, seasonality_mode = seasonality_mode, weekly_seasonality= weekly_seasonality, daily_seasonality = daily_seasonality, yearly_seasonality = yearly_seasonality, interval_width=interval_width, mcmc_samples=mcmc_samples, changepoint_range=changepoint_range) # If holiday not set using prophet model,we can add as below. # best_prophet_model.add_country_holidays(country_name=holiday_country_name) #prophet add_regressor ,adding additional influencer (regressor) features, but it different from multivariant model. if (additional_regressors): filterd_df[additional_regressors] = df1[additional_regressors] filterd_df.reset_index(drop=True) for v in reg_list: best_prophet_model=best_prophet_model.add_regressor(v) #best_prophet_model.fit(X_train) else: pass #Model prophet fit, it should be done before make_future_dataframe best_prophet_model.fit(filterd_df) future = best_prophet_model.make_future_dataframe(periods=no_of_periods, freq=pred_frequncy,include_history = False) if (additional_regressors): future[additional_regressors] = filterd_df[additional_regressors] future.reset_index(drop=True) future=future.dropna() else: pass #Final prediction forecast = best_prophet_model.predict(future) # forecast_df=forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']] # #Save forecast as csv file # forecast_df.to_csv(r"prophet_realtime_user_steps.csv",index = False, header=True) #Plot the predition and save in file forecast_plot = best_prophet_model.plot(forecast) imagefilename = os.path.join(dataFolderLocation,'log','img','prophet_fig.png') forecast_plot.savefig(imagefilename) #The below part is used to compare prophet predicted with actual value #For train data #Prophet model with train and test data, based on mape best params. best_prophet_model_new = Prophet(holidays=holiday, changepoint_prior_scale= changepoint_prior_scale, holidays_prior_scale = holidays_prior_scale, n_changepoints = n_changepoints, seasonality_mode = seasonality_mode, weekly_seasonality= weekly_seasonality, daily_seasonality = daily_seasonality, yearly_seasonality = yearly_seasonality, interval_width=interval_width, mcmc_samples=mcmc_samples, changepoint_range=changepoint_range) fp_forecast=pd.DataFrame() try: best_prophet_model_new.fit(X_train) fp_forecast = best_prophet_model_new.make_future_dataframe(periods=len_test, freq=pred_frequncy,include_history = False) fp_forecast = best_prophet_model_new.predict(fp_forecast) except ValueError as e: self.log.info(e) self.log.info ("------->Check mcmc_samples value in aion confiuration, either 0 (default) or defined value,e.g.mcmc_samples:'300' to be set.If no idea on value, set to default.\n") pred_forecast=fp_forecast[['ds','yhat']] pred_forecast['ds']=Actual['ds'].to_numpy() Actual.ds = pd.to_datetime(Actual.ds) pred_forecast.ds = pd.to_datetime(pred_forecast.ds) MAE = mean_absolute_error(Actual['y'],abs(pred_forecast['yhat'])) rmse = sqrt(mean_squared_error(Actual['y'],abs(pred_forecast['yhat']))) mse = mean_squared_error(Actual['y'],abs(pred_forecast['yhat'])) r2 = r2_score(Actual['y'],abs(pred_forecast['yhat'])) MAPE = self.mean_absolute_percentage_error(Actual['y'],abs(pred_forecast['yhat'])) #Final best prophet mse,rmse,mape scores self.log.info ("------->Prophet RMSE : "+str(rmse)) self.log.info ("------->Prophet MSE : "+str(mse)) self.log.info ("------->Prophet MAE : "+str(MAE)) self.log.info ("------->Prophet R2 : "+str(r2)) self.log.info("------->Prophet MAPE: "+str(MAPE)) #self.log.info(MAPE) #self.log.info('------->best_mape_score: \n '+str(best_mape_score)) prophet_df = pd.merge(Actual,pred_forecast, on=['ds'], how='left') cols = ['ds','y','yhat'] prophet_df_new = prophet_df[cols] prophet_df_new.dropna(inplace=True) actualfeature = target_col+'_actual' predictfeature = target_col+'_pred' prophet_df_new=prophet_df_new.rename(columns={'ds': 'datetime', 'y': actualfeature,'yhat': predictfeature}) #prophet_df_new.to_csv(predicted_data_file) #cv_results = cross_validation( model = best_prophet_model, initial = pd.to_timedelta(no_of_periods,unit=pred_frequncy), horizon = pd.to_timedelta(no_of_periods,unit=pred_frequncy)) #forecast_df=forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']] #Save forecast as csv file #forecast_df.to_csv(r"prophet_realtime_Output.csv",index = False, header=True) # self.log.info('------->Prophet time series forecast (last 7 prediction for user view): \n ') # self.log.info(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail(7)) plot_prd=plot_plotly(best_prophet_model, forecast) imagefilename = os.path.join(dataFolderLocation,'log','img','1_ppm_plot') plotly.offline.plot(plot_prd, filename=imagefilename,auto_open = False) plot_prd_components=plot_components_plotly(best_prophet_model, forecast) imagefilename = os.path.join(dataFolderLocation,'log','img','2_ppm_plot') plotly.offline.plot(plot_prd_components, filename=imagefilename,auto_open = False) executionTime=(time.time() - start) self.log.info('-------> Time: '+str(executionTime)) return best_prophet_model,best_mae_score,best_rmse_score,best_mse_score,best_mape_score,best_r2_score,pred_frequncy,additional_regressors,prophet_df_new except Exception as inst: #print("********** aion_fbprophet exception ************* \n") self.log.info('<!------------- Prophet Execute Error ---------------> '+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
generic_feature_statistics_generator_test.py
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from facets_overview.generic_feature_statistics_generator import GenericFeatureStatisticsGenerator import numpy as np import pandas as pd from tensorflow.python.platform import googletest class GenericFeatureStatisticsGeneratorTest(googletest.TestCase): def setUp(self): self.gfsg = GenericFeatureStatisticsGenerator() def testProtoFromDataFrames(self): data = [[1, 'hi'], [2, 'hello'], [3, 'hi']] df = pd.DataFrame(data, columns=['testFeatureInt', 'testFeatureString']) dataframes = [{'table': df, 'name': 'testDataset'}] p = self.gfsg.ProtoFromDataFrames(dataframes) self.assertEqual(1, len(p.datasets)) test_data = p.datasets[0] self.assertEqual('testDataset', test_data.name) self.assertEqual(3, test_data.num_examples) self.assertEqual(2, len(test_data.features)) if test_data.features[0].name == 'testFeatureInt': numfeat = test_data.features[0] stringfeat = test_data.features[1] else: numfeat = test_data.features[1] stringfeat = test_data.features[0] self.assertEqual('testFeatureInt', numfeat.name) self.assertEqual(self.gfsg.fs_proto.INT, numfeat.type) self.assertEqual(1, numfeat.num_stats.min) self.assertEqual(3, numfeat.num_stats.max) self.assertEqual('testFeatureString', stringfeat.name) self.assertEqual(self.gfsg.fs_proto.STRING, stringfeat.type) self.assertEqual(2, stringfeat.string_stats.unique) def testNdarrayToEntry(self): arr = np.array([1.0, 2.0, None, float('nan'), 3.0], dtype=float) entry = self.gfsg.NdarrayToEntry(arr) self.assertEqual(2, entry['missing']) arr = np.array(['a', 'b', float('nan'), 'c'], dtype=str) entry = self.gfsg.NdarrayToEntry(arr) self.assertEqual(1, entry['missing']) def testNdarrayToEntryTimeTypes(self): arr = np.array( [np.datetime64('2005-02-25'), np.datetime64('2006-02-25')], dtype=np.datetime64) entry = self.gfsg.NdarrayToEntry(arr) self.assertEqual([1109289600000000000, 1140825600000000000], entry['vals']) arr = np.array( [np.datetime64('2009-01-01') - np.datetime64('2008-01-01')], dtype=np.timedelta64) entry = self.gfsg.NdarrayToEntry(arr) self.assertEqual([31622400000000000], entry['vals']) def testDTypeToType(self): self.assertEqual(self.gfsg.fs_proto.INT, self.gfsg.DtypeToType(np.dtype(np.int32))) # Boolean and time types treated as int self.assertEqual(self.gfsg.fs_proto.INT, self.gfsg.DtypeToType(np.dtype(np.bool))) self.assertEqual(self.gfsg.fs_proto.INT, self.gfsg.DtypeToType(np.dtype(np.datetime64))) self.assertEqual(self.gfsg.fs_proto.INT, self.gfsg.DtypeToType(np.dtype(np.timedelta64))) self.assertEqual(self.gfsg.fs_proto.FLOAT, self.gfsg.DtypeToType(np.dtype(np.float32))) self.assertEqual(self.gfsg.fs_proto.STRING, self.gfsg.DtypeToType(np.dtype(np.str))) # Unsupported types treated as string for now self.assertEqual(self.gfsg.fs_proto.STRING, self.gfsg.DtypeToType(np.dtype(np.void))) def testGetDatasetsProtoFromEntriesLists(self): entries = {} entries['testFeature'] = { 'vals': [1, 2, 3], 'counts': [1, 1, 1], 'missing': 0, 'type': self.gfsg.fs_proto.INT } datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}] p = self.gfsg.GetDatasetsProto(datasets) self.assertEqual(1, len(p.datasets)) test_data = p.datasets[0] self.assertEqual('testDataset', test_data.name) self.assertEqual(3, test_data.num_examples) self.assertEqual(1, len(test_data.features)) numfeat = test_data.features[0] self.assertEqual('testFeature', numfeat.name) self.assertEqual(self.gfsg.fs_proto.INT, numfeat.type) self.assertEqual(1, numfeat.num_stats.min) self.assertEqual(3, numfeat.num_stats.max) hist = numfeat.num_stats.common_stats.num_values_histogram buckets = hist.buckets self.assertEqual(self.gfsg.histogram_proto.QUANTILES, hist.type) self.assertEqual(10, len(buckets)) self.assertEqual(1, buckets[0].low_value) self.assertEqual(1, buckets[0].high_value) self.assertEqual(.3, buckets[0].sample_count) self.assertEqual(1, buckets[9].low_value) self.assertEqual(1, buckets[9].high_value) self.assertEqual(.3, buckets[9].sample_count) def testGetDatasetsProtoSequenceExampleHistogram(self): entries = {} entries['testFeature'] = { 'vals': [1, 2, 2, 3], 'counts': [1, 2, 1], 'feat_lens': [1, 2, 1], 'missing': 0, 'type': self.gfsg.fs_proto.INT } datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}] p = self.gfsg.GetDatasetsProto(datasets) hist = p.datasets[0].features[ 0].num_stats.common_stats.feature_list_length_histogram buckets = hist.buckets self.assertEqual(self.gfsg.histogram_proto.QUANTILES, hist.type) self.assertEqual(10, len(buckets)) self.assertEqual(1, buckets[0].low_value) self.assertEqual(1, buckets[0].high_value) self.assertEqual(.3, buckets[0].sample_count) self.assertEqual(1.8, buckets[9].low_value) self.assertEqual(2, buckets[9].high_value) self.assertEqual(.3, buckets[9].sample_count) def testGetDatasetsProtoWithWhitelist(self): entries = {} entries['testFeature'] = { 'vals': [1, 2, 3], 'counts': [1, 1, 1], 'missing': 0, 'type': self.gfsg.fs_proto.INT } entries['ignoreFeature'] = { 'vals': [5, 6], 'counts': [1, 1], 'missing': 1, 'type': self.gfsg.fs_proto.INT } datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}] p = self.gfsg.GetDatasetsProto(datasets, features=['testFeature']) self.assertEqual(1, len(p.datasets)) test_data = p.datasets[0] self.assertEqual('testDataset', test_data.name) self.assertEqual(3, test_data.num_examples) self.assertEqual(1, len(test_data.features)) numfeat = test_data.features[0] self.assertEqual('testFeature', numfeat.name) self.assertEqual(1, numfeat.num_stats.min) def testGetDatasetsProtoWithMaxHistigramLevelsCount(self): # Selected entries' lengths make it easy to compute average length data = [['hi'], ['good'], ['hi'], ['hi'], ['a'], ['a']] df = pd.DataFrame(data, columns=['testFeatureString']) dataframes = [{'table': df, 'name': 'testDataset'}] # Getting proto from ProtoFromDataFrames instead of GetDatasetsProto # directly to avoid any hand written values ex: size of dataset. p = self.gfsg.ProtoFromDataFrames(dataframes, histogram_categorical_levels_count=2) self.assertEqual(1, len(p.datasets)) test_data = p.datasets[0] self.assertEqual('testDataset', test_data.name) self.assertEqual(6, test_data.num_examples) self.assertEqual(1, len(test_data.features)) numfeat = test_data.features[0] self.assertEqual('testFeatureString', numfeat.name) top_values = numfeat.string_stats.top_values self.assertEqual(3, top_values[0].frequency) self.assertEqual('hi', top_values[0].value) self.assertEqual(3, numfeat.string_stats.unique) self.assertEqual(2, numfeat.string_stats.avg_length) rank_hist = numfeat.string_stats.rank_histogram buckets = rank_hist.buckets self.assertEqual(2, len(buckets)) self.assertEqual('hi', buckets[0].label) self.assertEqual(3, buckets[0].sample_count) self.assertEqual('a', buckets[1].label) self.assertEqual(2, buckets[1].sample_count) if __name__ == '__main__': googletest.main()
generic_feature_statistics_generator.py
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Code for generating the feature_statistics proto from generic data. The proto is used as input for the Overview visualization. """ from facets_overview.base_generic_feature_statistics_generator import BaseGenericFeatureStatisticsGenerator import facets_overview.feature_statistics_pb2 as fs class GenericFeatureStatisticsGenerator(BaseGenericFeatureStatisticsGenerator): """Generator of stats proto from generic data.""" def __init__(self): BaseGenericFeatureStatisticsGenerator.__init__( self, fs.FeatureNameStatistics, fs.DatasetFeatureStatisticsList, fs.Histogram)
__init__.py
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ==============================================================================
feature_statistics_generator_test.py
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from facets_overview.feature_statistics_generator import FeatureStatisticsGenerator import numpy as np import tensorflow as tf from tensorflow.python.platform import googletest class FeatureStatisticsGeneratorTest(googletest.TestCase): def setUp(self): self.fs = FeatureStatisticsGenerator() def testParseExampleInt(self): # Tests parsing examples of integers examples = [] for i in range(50): example = tf.train.Example() example.features.feature['num'].int64_list.value.append(i) examples.append(example) entries = {} for i, example in enumerate(examples): self.fs._ParseExample(example.features.feature, [], entries, i) self.assertEqual(1, len(entries)) self.assertIn('num', entries) info = entries['num'] self.assertEqual(0, info['missing']) self.assertEqual(self.fs.fs_proto.INT, info['type']) for i in range(len(examples)): self.assertEqual(1, info['counts'][i]) self.assertEqual(i, info['vals'][i]) def testParseExampleMissingValueList(self): # Tests parsing examples of integers examples = [] example = tf.train.Example() # pylint: disable=pointless-statement example.features.feature['str'] # pylint: enable=pointless-statement examples.append(example) example = tf.train.Example() example.features.feature['str'].bytes_list.value.append(b'test') examples.append(example) entries = {} for i, example in enumerate(examples): self.fs._ParseExample(example.features.feature, [], entries, i) self.assertEqual(1, len(entries)) self.assertIn('str', entries) info = entries['str'] self.assertEqual(1, info['missing']) self.assertEqual(self.fs.fs_proto.STRING, info['type']) self.assertEqual(0, info['counts'][0]) self.assertEqual(1, info['counts'][1]) def _check_sequence_example_entries(self, entries, n_examples, n_features, feat_len=None): self.assertIn('num', entries) info = entries['num'] self.assertEqual(0, info['missing']) self.assertEqual(self.fs.fs_proto.INT, info['type']) for i in range(n_examples): self.assertEqual(n_features, info['counts'][i]) if feat_len is not None: self.assertEqual(feat_len, info['feat_lens'][i]) for i in range(n_examples * n_features): self.assertEqual(i, info['vals'][i]) if feat_len is None: self.assertEqual(0, len(info['feat_lens'])) def testParseExampleSequenceContext(self): # Tests parsing examples of integers in context field examples = [] for i in range(50): example = tf.train.SequenceExample() example.context.feature['num'].int64_list.value.append(i) examples.append(example) entries = {} for i, example in enumerate(examples): self.fs._ParseExample(example.context.feature, example.feature_lists.feature_list, entries, i) self._check_sequence_example_entries(entries, 50, 1) self.assertEqual(1, len(entries)) def testParseExampleSequenceFeatureList(self): examples = [] for i in range(50): example = tf.train.SequenceExample() feat = example.feature_lists.feature_list['num'].feature.add() feat.int64_list.value.append(i) examples.append(example) entries = {} for i, example in enumerate(examples): self.fs._ParseExample(example.context.feature, example.feature_lists.feature_list, entries, i) self._check_sequence_example_entries(entries, 50, 1, 1) def testParseExampleSequenceFeatureListMultipleEntriesInner(self): examples = [] for i in range(2): example = tf.train.SequenceExample() feat = example.feature_lists.feature_list['num'].feature.add() for j in range(25): feat.int64_list.value.append(i * 25 + j) examples.append(example) entries = {} for i, example in enumerate(examples): self.fs._ParseExample(example.context.feature, example.feature_lists.feature_list, entries, i) self._check_sequence_example_entries(entries, 2, 25, 1) def testParseExampleSequenceFeatureListMultipleEntriesOuter(self): # Tests parsing examples of integers in context field examples = [] for i in range(2): example = tf.train.SequenceExample() for j in range(25): feat = example.feature_lists.feature_list['num'].feature.add() feat.int64_list.value.append(i * 25 + j) examples.append(example) entries = {} for i, example in enumerate(examples): self.fs._ParseExample(example.context.feature, example.feature_lists.feature_list, entries, i) self._check_sequence_example_entries(entries, 2, 25, 25) def testVaryingCountsAndMissing(self): # Tests parsing examples of when some examples have missing features examples = [] for i in range(5): example = tf.train.Example() example.features.feature['other'].int64_list.value.append(0) for _ in range(i): example.features.feature['num'].int64_list.value.append(i) examples.append(example) example = tf.train.Example() example.features.feature['other'].int64_list.value.append(0) examples.append(example) entries = {} for i, example in enumerate(examples): self.fs._ParseExample(example.features.feature, [], entries, i) info = entries['num'] self.assertEqual(2, info['missing']) self.assertEqual(4, len(info['counts'])) for i in range(4): self.assertEqual(i + 1, info['counts'][i]) self.assertEqual(10, len(info['vals'])) def testParseExampleStringsAndFloats(self): # Tests parsing examples of string and float features examples = [] for i in range(50): example = tf.train.Example() example.features.feature['str'].bytes_list.value.append(b'hi') example.features.feature['float'].float_list.value.append(i) examples.append(example) entries = {} for i, example in enumerate(examples): self.fs._ParseExample(example.features.feature, [], entries, i) self.assertEqual(2, len(entries)) self.assertEqual(self.fs.fs_proto.FLOAT, entries['float']['type']) self.assertEqual(self.fs.fs_proto.STRING, entries['str']['type']) for i in range(len(examples)): self.assertEqual(1, entries['str']['counts'][i]) self.assertEqual(1, entries['float']['counts'][i]) self.assertEqual(i, entries['float']['vals'][i]) self.assertEqual('hi', entries['str']['vals'][i].decode( 'UTF-8', 'strict')) def testParseExamplesTypeMismatch(self): examples = [] example = tf.train.Example() example.features.feature['feat'].int64_list.value.append(0) examples.append(example) example = tf.train.Example() example.features.feature['feat'].bytes_list.value.append(b'str') examples.append(example) entries = {} self.fs._ParseExample(examples[0].features.feature, [], entries, 0) with self.assertRaises(TypeError): self.fs._ParseExample(examples[1].features.feature, [], entries, 1) def testGetDatasetsProtoFromEntriesLists(self): entries = {} entries['testFeature'] = { 'vals': [1, 2, 3], 'counts': [1, 1, 1], 'missing': 0, 'type': self.fs.fs_proto.INT } datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}] p = self.fs.GetDatasetsProto(datasets) self.assertEqual(1, len(p.datasets)) test_data = p.datasets[0] self.assertEqual('testDataset', test_data.name) self.assertEqual(3, test_data.num_examples) self.assertEqual(1, len(test_data.features)) numfeat = test_data.features[0] self.assertEqual('testFeature', numfeat.name) self.assertEqual(self.fs.fs_proto.INT, numfeat.type) self.assertEqual(1, numfeat.num_stats.min) self.assertEqual(3, numfeat.num_stats.max) def testGetProtoNums(self): # Tests converting int examples into the feature stats proto examples = [] for i in range(50): example = tf.train.Example() example.features.feature['num'].int64_list.value.append(i) examples.append(example) example = tf.train.Example() example.features.feature['other'].int64_list.value.append(0) examples.append(example) entries = {} for i, example in enumerate(examples): self.fs._ParseExample(example.features.feature, [], entries, i) datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}] p = self.fs.GetDatasetsProto(datasets) self.assertEqual(1, len(p.datasets)) test_data = p.datasets[0] self.assertEqual('test', test_data.name) self.assertEqual(51, test_data.num_examples) numfeat = test_data.features[0] if ( test_data.features[0].name == 'num') else test_data.features[1] self.assertEqual('num', numfeat.name) self.assertEqual(self.fs.fs_proto.INT, numfeat.type) self.assertEqual(0, numfeat.num_stats.min) self.assertEqual(49, numfeat.num_stats.max) self.assertEqual(24.5, numfeat.num_stats.mean) self.assertEqual(24.5, numfeat.num_stats.median) self.assertEqual(1, numfeat.num_stats.num_zeros) self.assertAlmostEqual(14.430869689, numfeat.num_stats.std_dev, 4) self.assertEqual(1, numfeat.num_stats.common_stats.num_missing) self.assertEqual(50, numfeat.num_stats.common_stats.num_non_missing) self.assertEqual(1, numfeat.num_stats.common_stats.min_num_values) self.assertEqual(1, numfeat.num_stats.common_stats.max_num_values) self.assertAlmostEqual(1, numfeat.num_stats.common_stats.avg_num_values, 4) hist = numfeat.num_stats.common_stats.num_values_histogram buckets = hist.buckets self.assertEqual(self.fs.histogram_proto.QUANTILES, hist.type) self.assertEqual(10, len(buckets)) self.assertEqual(1, buckets[0].low_value) self.assertEqual(1, buckets[0].high_value) self.assertEqual(5, buckets[0].sample_count) self.assertEqual(1, buckets[9].low_value) self.assertEqual(1, buckets[9].high_value) self.assertEqual(5, buckets[9].sample_count) self.assertEqual(2, len(numfeat.num_stats.histograms)) buckets = numfeat.num_stats.histograms[0].buckets self.assertEqual(self.fs.histogram_proto.STANDARD, numfeat.num_stats.histograms[0].type) self.assertEqual(10, len(buckets)) self.assertEqual(0, buckets[0].low_value) self.assertEqual(4.9, buckets[0].high_value) self.assertEqual(5, buckets[0].sample_count) self.assertAlmostEqual(44.1, buckets[9].low_value) self.assertEqual(49, buckets[9].high_value) self.assertEqual(5, buckets[9].sample_count) buckets = numfeat.num_stats.histograms[1].buckets self.assertEqual(self.fs.histogram_proto.QUANTILES, numfeat.num_stats.histograms[1].type) self.assertEqual(10, len(buckets)) self.assertEqual(0, buckets[0].low_value) self.assertEqual(4.9, buckets[0].high_value) self.assertEqual(5, buckets[0].sample_count) self.assertAlmostEqual(44.1, buckets[9].low_value) self.assertEqual(49, buckets[9].high_value) self.assertEqual(5, buckets[9].sample_count) def testQuantiles(self): examples = [] for i in range(50): example = tf.train.Example() example.features.feature['num'].int64_list.value.append(i) examples.append(example) for i in range(50): example = tf.train.Example() example.features.feature['num'].int64_list.value.append(100) examples.append(example) entries = {} for i, example in enumerate(examples): self.fs._ParseExample(example.features.feature, [], entries, i) datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}] p = self.fs.GetDatasetsProto(datasets) numfeat = p.datasets[0].features[0] self.assertEqual(2, len(numfeat.num_stats.histograms)) self.assertEqual(self.fs.histogram_proto.QUANTILES, numfeat.num_stats.histograms[1].type) buckets = numfeat.num_stats.histograms[1].buckets self.assertEqual(10, len(buckets)) self.assertEqual(0, buckets[0].low_value) self.assertEqual(9.9, buckets[0].high_value) self.assertEqual(10, buckets[0].sample_count) self.assertEqual(100, buckets[9].low_value) self.assertEqual(100, buckets[9].high_value) self.assertEqual(10, buckets[9].sample_count) def testInfinityAndNan(self): examples = [] for i in range(50): example = tf.train.Example() example.features.feature['num'].float_list.value.append(i) examples.append(example) example = tf.train.Example() example.features.feature['num'].float_list.value.append(float('inf')) examples.append(example) example = tf.train.Example() example.features.feature['num'].float_list.value.append(float('-inf')) examples.append(example) example = tf.train.Example() example.features.feature['num'].float_list.value.append(float('nan')) examples.append(example) entries = {} for i, example in enumerate(examples): self.fs._ParseExample(example.features.feature, [], entries, i) datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}] p = self.fs.GetDatasetsProto(datasets) numfeat = p.datasets[0].features[0] self.assertEqual('num', numfeat.name) self.assertEqual(self.fs.fs_proto.FLOAT, numfeat.type) self.assertTrue(np.isnan(numfeat.num_stats.min)) self.assertTrue(np.isnan(numfeat.num_stats.max)) self.assertTrue(np.isnan(numfeat.num_stats.mean)) self.assertTrue(np.isnan(numfeat.num_stats.median)) self.assertEqual(1, numfeat.num_stats.num_zeros) self.assertTrue(np.isnan(numfeat.num_stats.std_dev)) self.assertEqual(53, numfeat.num_stats.common_stats.num_non_missing) hist = buckets = numfeat.num_stats.histograms[0] buckets = hist.buckets self.assertEqual(self.fs.histogram_proto.STANDARD, hist.type) self.assertEqual(1, hist.num_nan) self.assertEqual(10, len(buckets)) self.assertEqual(float('-inf'), buckets[0].low_value) self.assertEqual(4.9, buckets[0].high_value) self.assertEqual(6, buckets[0].sample_count) self.assertEqual(44.1, buckets[9].low_value) self.assertEqual(float('inf'), buckets[9].high_value) self.assertEqual(6, buckets[9].sample_count) def testInfinitysOnly(self): examples = [] example = tf.train.Example() example.features.feature['num'].float_list.value.append(float('inf')) examples.append(example) example = tf.train.Example() example.features.feature['num'].float_list.value.append(float('-inf')) examples.append(example) entries = {} for i, example in enumerate(examples): self.fs._ParseExample(example.features.feature, [], entries, i) datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}] p = self.fs.GetDatasetsProto(datasets) numfeat = p.datasets[0].features[0] hist = buckets = numfeat.num_stats.histograms[0] buckets = hist.buckets self.assertEqual(self.fs.histogram_proto.STANDARD, hist.type) self.assertEqual(10, len(buckets)) self.assertEqual(float('-inf'), buckets[0].low_value) self.assertEqual(0.1, buckets[0].high_value) self.assertEqual(1, buckets[0].sample_count) self.assertEqual(0.9, buckets[9].low_value) self.assertEqual(float('inf'), buckets[9].high_value) self.assertEqual(1, buckets[9].sample_count) def testGetProtoStrings(self): # Tests converting string examples into the feature stats proto examples = [] for i in range(2): example = tf.train.Example() example.features.feature['str'].bytes_list.value.append(b'hello') examples.append(example) for i in range(3): example = tf.train.Example() example.features.feature['str'].bytes_list.value.append(b'hi') examples.append(example) example = tf.train.Example() example.features.feature['str'].bytes_list.value.append(b'hey') examples.append(example) entries = {} for i, example in enumerate(examples): self.fs._ParseExample(example.features.feature, [], entries, i) datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}] p = self.fs.GetDatasetsProto(datasets) self.assertEqual(1, len(p.datasets)) test_data = p.datasets[0] self.assertEqual('test', test_data.name) self.assertEqual(6, test_data.num_examples) strfeat = test_data.features[0] self.assertEqual('str', strfeat.name) self.assertEqual(self.fs.fs_proto.STRING, strfeat.type) self.assertEqual(3, strfeat.string_stats.unique) self.assertAlmostEqual(19 / 6.0, strfeat.string_stats.avg_length, 4) self.assertEqual(0, strfeat.string_stats.common_stats.num_missing) self.assertEqual(6, strfeat.string_stats.common_stats.num_non_missing) self.assertEqual(1, strfeat.string_stats.common_stats.min_num_values) self.assertEqual(1, strfeat.string_stats.common_stats.max_num_values) self.assertEqual(1, strfeat.string_stats.common_stats.avg_num_values) hist = strfeat.string_stats.common_stats.num_values_histogram buckets = hist.buckets self.assertEqual(self.fs.histogram_proto.QUANTILES, hist.type) self.assertEqual(10, len(buckets)) self.assertEqual(1, buckets[0].low_value) self.assertEqual(1, buckets[0].high_value) self.assertEqual(.6, buckets[0].sample_count) self.assertEqual(1, buckets[9].low_value) self.assertEqual(1, buckets[9].high_value) self.assertEqual(.6, buckets[9].sample_count) self.assertEqual(2, len(strfeat.string_stats.top_values)) self.assertEqual(3, strfeat.string_stats.top_values[0].frequency) self.assertEqual('hi', strfeat.string_stats.top_values[0].value) self.assertEqual(2, strfeat.string_stats.top_values[1].frequency) self.assertEqual('hello', strfeat.string_stats.top_values[1].value) buckets = strfeat.string_stats.rank_histogram.buckets self.assertEqual(3, len(buckets)) self.assertEqual(0, buckets[0].low_rank) self.assertEqual(0, buckets[0].high_rank) self.assertEqual(3, buckets[0].sample_count) self.assertEqual('hi', buckets[0].label) self.assertEqual(2, buckets[2].low_rank) self.assertEqual(2, buckets[2].high_rank) self.assertEqual(1, buckets[2].sample_count) self.assertEqual('hey', buckets[2].label) def testGetProtoMultipleDatasets(self): # Tests converting multiple datsets into the feature stats proto # including ensuring feature order is consistent in the protos. examples1 = [] for i in range(2): example = tf.train.Example() example.features.feature['str'].bytes_list.value.append(b'one') example.features.feature['num'].int64_list.value.append(0) examples1.append(example) examples2 = [] example = tf.train.Example() example.features.feature['num'].int64_list.value.append(1) example.features.feature['str'].bytes_list.value.append(b'two') examples2.append(example) entries1 = {} for i, example1 in enumerate(examples1): self.fs._ParseExample(example1.features.feature, [], entries1, i) entries2 = {} for i, example2 in enumerate(examples2): self.fs._ParseExample(example2.features.feature, [], entries2, i) datasets = [{ 'entries': entries1, 'size': len(examples1), 'name': 'test1' }, { 'entries': entries2, 'size': len(examples2), 'name': 'test2' }] p = self.fs.GetDatasetsProto(datasets) self.assertEqual(2, len(p.datasets)) test_data_1 = p.datasets[0] self.assertEqual('test1', test_data_1.name) self.assertEqual(2, test_data_1.num_examples) num_feat_index = 0 if test_data_1.features[0].name == 'num' else 1 self.assertEqual(0, test_data_1.features[num_feat_index].num_stats.max) test_data_2 = p.datasets[1] self.assertEqual('test2', test_data_2.name) self.assertEqual(1, test_data_2.num_examples) self.assertEqual(1, test_data_2.features[num_feat_index].num_stats.max) def testGetEntriesNoFiles(self): features, num_examples = self.fs._GetEntries(['test'], 10, lambda unused_path: []) self.assertEqual(0, num_examples) self.assertEqual({}, features) @staticmethod def get_example_iter(): def ex_iter(unused_filename): examples = [] for i in range(50): example = tf.train.Example() example.features.feature['num'].int64_list.value.append(i) examples.append(example.SerializeToString()) return examples return ex_iter def testGetEntries_one(self): features, num_examples = self.fs._GetEntries(['test'], 1, self.get_example_iter()) self.assertEqual(1, num_examples) self.assertTrue('num' in features) def testGetEntries_oneFile(self): unused_features, num_examples = self.fs._GetEntries(['test'], 1000, self.get_example_iter()) self.assertEqual(50, num_examples) def testGetEntries_twoFiles(self): unused_features, num_examples = self.fs._GetEntries(['test0', 'test1'], 1000, self.get_example_iter()) self.assertEqual(100, num_examples) def testGetEntries_stopInSecondFile(self): unused_features, num_examples = self.fs._GetEntries([ 'test@0', 'test@1', 'test@2', 'test@3', 'test@4', 'test@5', 'test@6', 'test@7', 'test@8', 'test@9' ], 75, self.get_example_iter()) self.assertEqual(75, num_examples) if __name__ == '__main__': googletest.main()
feature_statistics_generator.py
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Class for generating the feature_statistics proto. The proto is used as input for the Overview visualization. """ from facets_overview.base_feature_statistics_generator import BaseFeatureStatisticsGenerator import facets_overview.feature_statistics_pb2 as fs class FeatureStatisticsGenerator(BaseFeatureStatisticsGenerator): """Generator of stats proto from TF data.""" def __init__(self): BaseFeatureStatisticsGenerator.__init__(self, fs.FeatureNameStatistics, fs.DatasetFeatureStatisticsList, fs.Histogram)
base_generic_feature_statistics_generator.py
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base class for generating the feature_statistics proto from generic data. The proto is used as input for the Overview visualization. """ import numpy as np import pandas as pd import sys class BaseGenericFeatureStatisticsGenerator(object): """Base class for generator of stats proto from generic data.""" def __init__(self, fs_proto, datasets_proto, histogram_proto): self.fs_proto = fs_proto self.datasets_proto = datasets_proto self.histogram_proto = histogram_proto def ProtoFromDataFrames(self, dataframes, histogram_categorical_levels_count=None): """Creates a feature statistics proto from a set of pandas dataframes. Args: dataframes: A list of dicts describing tables for each dataset for the proto. Each entry contains a 'table' field of the dataframe of the data and a 'name' field to identify the dataset in the proto. histogram_categorical_levels_count: int, controls the maximum number of levels to display in histograms for categorical features. Useful to prevent codes/IDs features from bloating the stats object. Defaults to None. Returns: The feature statistics proto for the provided tables. """ datasets = [] for dataframe in dataframes: table = dataframe['table'] table_entries = {} for col in table: table_entries[col] = self.NdarrayToEntry(table[col]) datasets.append({ 'entries': table_entries, 'size': len(table), 'name': dataframe['name'] }) return self.GetDatasetsProto( datasets, histogram_categorical_levels_count=histogram_categorical_levels_count) def DtypeToType(self, dtype): """Converts a Numpy dtype to the FeatureNameStatistics.Type proto enum.""" if dtype.char in np.typecodes['AllFloat']: return self.fs_proto.FLOAT elif (dtype.char in np.typecodes['AllInteger'] or dtype == bool or np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64)): return self.fs_proto.INT else: return self.fs_proto.STRING def DtypeToNumberConverter(self, dtype): """Converts a Numpy dtype to a converter method if applicable. The converter method takes in a numpy array of objects of the provided dtype and returns a numpy array of the numbers backing that object for statistical analysis. Returns None if no converter is necessary. Args: dtype: The numpy dtype to make a converter for. Returns: The converter method or None. """ if np.issubdtype(dtype, np.datetime64): def DatetimesToNumbers(dt_list): return np.array([pd.Timestamp(dt).value for dt in dt_list]) return DatetimesToNumbers elif np.issubdtype(dtype, np.timedelta64): def TimedetlasToNumbers(td_list): return np.array([pd.Timedelta(td).value for td in td_list]) return TimedetlasToNumbers else: return None def NdarrayToEntry(self, x): """Converts an ndarray to the Entry format.""" row_counts = [] for row in x: try: rc = np.count_nonzero(~np.isnan(row)) if rc != 0: row_counts.append(rc) except TypeError: try: row_counts.append(row.size) except AttributeError: row_counts.append(1) data_type = self.DtypeToType(x.dtype) converter = self.DtypeToNumberConverter(x.dtype) flattened = x.ravel() orig_size = len(flattened) # Remove all None and nan values and count how many were removed. flattened = flattened[flattened != np.array(None)] if converter: flattened = converter(flattened) if data_type == self.fs_proto.STRING: flattened_temp = [] for x in flattened: try: if str(x) != 'nan': flattened_temp.append(x) except UnicodeEncodeError: if x.encode('utf-8') != 'nan': flattened_temp.append(x) flattened = flattened_temp else: flattened = flattened[~np.isnan(flattened)].tolist() missing = orig_size - len(flattened) return { 'vals': flattened, 'counts': row_counts, 'missing': missing, 'type': data_type } def GetDatasetsProto(self, datasets, features=None, histogram_categorical_levels_count=None): """Generates the feature stats proto from dictionaries of feature values. Args: datasets: An array of dictionaries, one per dataset, each one containing: - 'entries': The dictionary of features in the dataset from the parsed examples. - 'size': The number of examples parsed for the dataset. - 'name': The name of the dataset. features: A list of strings that is a whitelist of feature names to create feature statistics for. If set to None then all features in the dataset are analyzed. Defaults to None. histogram_categorical_levels_count: int, controls the maximum number of levels to display in histograms for categorical features. Useful to prevent codes/IDs features from bloating the stats object. Defaults to None. Returns: The feature statistics proto for the provided datasets. """ features_seen = set() whitelist_features = set(features) if features else None all_datasets = self.datasets_proto() # TODO(jwexler): Add ability to generate weighted feature stats # if there is a specified weight feature in the dataset. # Initialize each dataset for dataset in datasets: all_datasets.datasets.add( name=dataset['name'], num_examples=dataset['size']) # This outer loop ensures that for each feature seen in any of the provided # datasets, we check the feature once against all datasets. for outer_dataset in datasets: for key, value in outer_dataset['entries'].items(): # If we have a feature whitelist and this feature is not in the # whitelist then do not process it. # If we have processed this feature already, no need to do it again. if ((whitelist_features and key not in whitelist_features) or key in features_seen): continue features_seen.add(key) # Default to type int if no type is found, so that the fact that all # values are missing from this feature can be displayed. feature_type = value['type'] if 'type' in value else self.fs_proto.INT # Process the found feature for each dataset. for j, dataset in enumerate(datasets): feat = all_datasets.datasets[j].features.add( type=feature_type, name=key.encode('utf-8')) value = dataset['entries'].get(key) has_data = value is not None and (value['vals'].size != 0 if isinstance( value['vals'], np.ndarray) else value['vals']) commonstats = None # For numeric features, calculate numeric statistics. if feat.type in (self.fs_proto.INT, self.fs_proto.FLOAT): featstats = feat.num_stats commonstats = featstats.common_stats if has_data: nums = value['vals'] featstats.std_dev = np.std(nums).item() featstats.mean = np.mean(nums).item() featstats.min = np.min(nums).item() featstats.max = np.max(nums).item() featstats.median = np.median(nums).item() featstats.num_zeros = len(nums) - np.count_nonzero(nums) nums = np.array(nums) num_nan = len(nums[np.isnan(nums)]) num_posinf = len(nums[np.isposinf(nums)]) num_neginf = len(nums[np.isneginf(nums)]) # Remove all non-finite (including NaN) values from the numeric # values in order to calculate histogram buckets/counts. The # inf values will be added back to the first and last buckets. nums = nums[np.isfinite(nums)] counts, buckets = np.histogram(nums) hist = featstats.histograms.add() hist.type = self.histogram_proto.STANDARD hist.num_nan = num_nan for bucket_count in range(len(counts)): bucket = hist.buckets.add( low_value=buckets[bucket_count], high_value=buckets[bucket_count + 1], sample_count=counts[bucket_count].item()) # Add any negative or positive infinities to the first and last # buckets in the histogram. if bucket_count == 0 and num_neginf > 0: bucket.low_value = float('-inf') bucket.sample_count += num_neginf elif bucket_count == len(counts) - 1 and num_posinf > 0: bucket.high_value = float('inf') bucket.sample_count += num_posinf if not hist.buckets: if num_neginf: hist.buckets.add( low_value=float('-inf'), high_value=float('-inf'), sample_count=num_neginf) if num_posinf: hist.buckets.add( low_value=float('inf'), high_value=float('inf'), sample_count=num_posinf) self._PopulateQuantilesHistogram(featstats.histograms.add(),nums.tolist()) elif feat.type == self.fs_proto.STRING: featstats = feat.string_stats commonstats = featstats.common_stats if has_data: strs = [] for item in value['vals']: strs.append(item if hasattr(item, '__len__') else item.encode('utf-8') if hasattr(item, 'encode') else str( item)) featstats.avg_length = np.mean(np.vectorize(len)(strs)) vals, counts = np.unique(strs, return_counts=True) featstats.unique = len(vals) sorted_vals = sorted(zip(counts, vals), reverse=True) sorted_vals = sorted_vals[:histogram_categorical_levels_count] for val_index, val in enumerate(sorted_vals): try: if (sys.version_info.major < 3 or isinstance(val[1], (bytes, bytearray))): printable_val = val[1].decode('UTF-8', 'strict') else: printable_val = val[1] except (UnicodeDecodeError, UnicodeEncodeError): printable_val = '__BYTES_VALUE__' bucket = featstats.rank_histogram.buckets.add( low_rank=val_index, high_rank=val_index, sample_count=(val[0].item()), label=printable_val) if val_index < 2: featstats.top_values.add( value=bucket.label, frequency=bucket.sample_count) # Add the common stats regardless of the feature type. if has_data: commonstats.num_missing = value['missing'] commonstats.num_non_missing = (all_datasets.datasets[j].num_examples - featstats.common_stats.num_missing) commonstats.min_num_values = int(np.min(value['counts']).astype(int)) commonstats.max_num_values = int(np.max(value['counts']).astype(int)) commonstats.avg_num_values = np.mean(value['counts']) if 'feat_lens' in value and value['feat_lens']: self._PopulateQuantilesHistogram( commonstats.feature_list_length_histogram, value['feat_lens']) self._PopulateQuantilesHistogram(commonstats.num_values_histogram, value['counts']) else: commonstats.num_non_missing = 0 commonstats.num_missing = all_datasets.datasets[j].num_examples return all_datasets def _PopulateQuantilesHistogram(self, hist, nums): """Fills in the histogram with quantile information from the provided array. Args: hist: A Histogram proto message to fill in. nums: A list of numbers to create a quantiles histogram from. """ if not nums: return num_quantile_buckets = 10 quantiles_to_get = [ x * 100 / num_quantile_buckets for x in range(num_quantile_buckets + 1) ] try: quantiles = np.percentile(nums, quantiles_to_get) except: quantiles = [0.0] hist.type = self.histogram_proto.QUANTILES quantiles_sample_count = float(len(nums)) / num_quantile_buckets for low, high in zip(quantiles, quantiles[1:]): hist.buckets.add( low_value=low, high_value=high, sample_count=quantiles_sample_count)
base_feature_statistics_generator.py
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base class for generating the feature_statistics proto from TensorFlow data. The proto is used as input for the Overview visualization. """ from functools import partial from facets_overview.base_generic_feature_statistics_generator import BaseGenericFeatureStatisticsGenerator import tensorflow as tf # The feature name used to track sequence length when analyzing # tf.SequenceExamples. SEQUENCE_LENGTH_FEATURE_NAME = 'sequence length (derived feature)' class BaseFeatureStatisticsGenerator(BaseGenericFeatureStatisticsGenerator): """Base class for generator of stats proto from TF data.""" def __init__(self, fs_proto, datasets_proto, histogram_proto): BaseGenericFeatureStatisticsGenerator.__init__( self, fs_proto, datasets_proto, histogram_proto) def ProtoFromTfRecordFiles(self, files, max_entries=10000, features=None, is_sequence=False, iterator_options=None, histogram_categorical_levels_count=None): """Creates a feature statistics proto from a set of TFRecord files. Args: files: A list of dicts describing files for each dataset for the proto. Each entry contains a 'path' field with the path to the TFRecord file on disk and a 'name' field to identify the dataset in the proto. max_entries: The maximum number of examples to load from each dataset in order to create the proto. Defaults to 10000. features: A list of strings that is a whitelist of feature names to create feature statistics for. If set to None then all features in the dataset are analyzed. Defaults to None. is_sequence: True if the input data from 'tables' are tf.SequenceExamples, False if tf.Examples. Defaults to false. iterator_options: Options to pass to the iterator that reads the examples. Defaults to None. histogram_categorical_levels_count: int, controls the maximum number of levels to display in histograms for categorical features. Useful to prevent codes/IDs features from bloating the stats object. Defaults to None. Returns: The feature statistics proto for the provided files. """ datasets = [] for entry in files: entries, size = self._GetTfRecordEntries(entry['path'], max_entries, is_sequence, iterator_options) datasets.append({'entries': entries, 'size': size, 'name': entry['name']}) return self.GetDatasetsProto( datasets, features, histogram_categorical_levels_count) def _ParseExample(self, example_features, example_feature_lists, entries, index): """Parses data from an example, populating a dictionary of feature values. Args: example_features: A map of strings to tf.Features from the example. example_feature_lists: A map of strings to tf.FeatureLists from the example. entries: A dictionary of all features parsed thus far and arrays of their values. This is mutated by the function. index: The index of the example to parse from a list of examples. Raises: TypeError: Raises an exception when a feature has inconsistent types across examples. """ features_seen = set() for feature_list, is_feature in zip( [example_features, example_feature_lists], [True, False]): sequence_length = None for feature_name in feature_list: # If this feature has not been seen in previous examples, then # initialize its entry into the entries dictionary. if feature_name not in entries: entries[feature_name] = { 'vals': [], 'counts': [], 'feat_lens': [], 'missing': index } feature_entry = entries[feature_name] feature = feature_list[feature_name] value_type = None value_list = [] if is_feature: # If parsing a tf.Feature, extract the type and values simply. if feature.HasField('float_list'): value_list = feature.float_list.value value_type = self.fs_proto.FLOAT elif feature.HasField('bytes_list'): value_list = feature.bytes_list.value value_type = self.fs_proto.STRING elif feature.HasField('int64_list'): value_list = feature.int64_list.value value_type = self.fs_proto.INT else: # If parsing a tf.FeatureList, get the type and values by iterating # over all Features in the FeatureList. sequence_length = len(feature.feature) if sequence_length != 0 and feature.feature[0].HasField('float_list'): for feat in feature.feature: for value in feat.float_list.value: value_list.append(value) value_type = self.fs_proto.FLOAT elif sequence_length != 0 and feature.feature[0].HasField( 'bytes_list'): for feat in feature.feature: for value in feat.bytes_list.value: value_list.append(value) value_type = self.fs_proto.STRING elif sequence_length != 0 and feature.feature[0].HasField( 'int64_list'): for feat in feature.feature: for value in feat.int64_list.value: value_list.append(value) value_type = self.fs_proto.INT if value_type is not None: if 'type' not in feature_entry: feature_entry['type'] = value_type elif feature_entry['type'] != value_type: raise TypeError('type mismatch for feature ' + feature_name) feature_entry['counts'].append(len(value_list)) feature_entry['vals'].extend(value_list) if sequence_length is not None: feature_entry['feat_lens'].append(sequence_length) if value_list: features_seen.add(feature_name) # For all previously-seen features not found in this example, update the # feature's missing value. for f in entries: fv = entries[f] if f not in features_seen: fv['missing'] += 1 def _GetEntries(self, paths, max_entries, iterator_from_file, is_sequence=False): """Extracts examples into a dictionary of feature values. Args: paths: A list of the paths to the files to parse. max_entries: The maximum number of examples to load. iterator_from_file: A method that takes a file path string and returns an iterator to the examples in that file. is_sequence: True if the input data from 'iterator_from_file' are tf.SequenceExamples, False if tf.Examples. Defaults to false. Returns: A tuple with two elements: - A dictionary of all features parsed thus far and arrays of their values. - The number of examples parsed. """ entries = {} index = 0 for filepath in paths: reader = iterator_from_file(filepath) for record in reader: if is_sequence: sequence_example = tf.train.SequenceExample.FromString(record) self._ParseExample(sequence_example.context.feature, sequence_example.feature_lists.feature_list, entries, index) else: self._ParseExample( tf.train.Example.FromString(record).features.feature, [], entries, index) index += 1 if index == max_entries: return entries, index return entries, index def _GetTfRecordEntries(self, path, max_entries, is_sequence, iterator_options): """Extracts TFRecord examples into a dictionary of feature values. Args: path: The path to the TFRecord file(s). max_entries: The maximum number of examples to load. is_sequence: True if the input data from 'path' are tf.SequenceExamples, False if tf.Examples. Defaults to false. iterator_options: Options to pass to the iterator that reads the examples. Defaults to None. Returns: A tuple with two elements: - A dictionary of all features parsed thus far and arrays of their values. - The number of examples parsed. """ return self._GetEntries([path], max_entries, partial( tf.compat.v1.io.tf_record_iterator, options=iterator_options), is_sequence)
feature_statistics_pb2.py
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Generated by the protocol buffer compiler. DO NOT EDIT! # source: feature_statistics.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='feature_statistics.proto', package='featureStatistics', syntax='proto3', serialized_pb=_b('\n\x18\x66\x65\x61ture_statistics.proto\x12\x11\x66\x65\x61tureStatistics\"]\n\x1c\x44\x61tasetFeatureStatisticsList\x12=\n\x08\x64\x61tasets\x18\x01 \x03(\x0b\x32+.featureStatistics.DatasetFeatureStatistics\"\x99\x01\n\x18\x44\x61tasetFeatureStatistics\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0cnum_examples\x18\x02 \x01(\x04\x12\x1d\n\x15weighted_num_examples\x18\x04 \x01(\x01\x12:\n\x08\x66\x65\x61tures\x18\x03 \x03(\x0b\x32(.featureStatistics.FeatureNameStatistics\"\x8b\x03\n\x15\x46\x65\x61tureNameStatistics\x12\x0c\n\x04name\x18\x01 \x01(\t\x12;\n\x04type\x18\x02 \x01(\x0e\x32-.featureStatistics.FeatureNameStatistics.Type\x12\x39\n\tnum_stats\x18\x03 \x01(\x0b\x32$.featureStatistics.NumericStatisticsH\x00\x12;\n\x0cstring_stats\x18\x04 \x01(\x0b\x32#.featureStatistics.StringStatisticsH\x00\x12\x39\n\x0b\x62ytes_stats\x18\x05 \x01(\x0b\x32\".featureStatistics.BytesStatisticsH\x00\x12\x38\n\x0c\x63ustom_stats\x18\x06 \x03(\x0b\x32\".featureStatistics.CustomStatistic\"1\n\x04Type\x12\x07\n\x03INT\x10\x00\x12\t\n\x05\x46LOAT\x10\x01\x12\n\n\x06STRING\x10\x02\x12\t\n\x05\x42YTES\x10\x03\x42\x07\n\x05stats\"x\n\x18WeightedCommonStatistics\x12\x17\n\x0fnum_non_missing\x18\x01 \x01(\x01\x12\x13\n\x0bnum_missing\x18\x02 \x01(\x01\x12\x16\n\x0e\x61vg_num_values\x18\x03 \x01(\x01\x12\x16\n\x0etot_num_values\x18\x04 \x01(\x01\"w\n\x0f\x43ustomStatistic\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x03num\x18\x02 \x01(\x01H\x00\x12\r\n\x03str\x18\x03 \x01(\tH\x00\x12\x31\n\thistogram\x18\x04 \x01(\x0b\x32\x1c.featureStatistics.HistogramH\x00\x42\x05\n\x03val\"\xaa\x02\n\x11NumericStatistics\x12\x39\n\x0c\x63ommon_stats\x18\x01 \x01(\x0b\x32#.featureStatistics.CommonStatistics\x12\x0c\n\x04mean\x18\x02 \x01(\x01\x12\x0f\n\x07std_dev\x18\x03 \x01(\x01\x12\x11\n\tnum_zeros\x18\x04 \x01(\x04\x12\x0b\n\x03min\x18\x05 \x01(\x01\x12\x0e\n\x06median\x18\x06 \x01(\x01\x12\x0b\n\x03max\x18\x07 \x01(\x01\x12\x30\n\nhistograms\x18\x08 \x03(\x0b\x32\x1c.featureStatistics.Histogram\x12L\n\x16weighted_numeric_stats\x18\t \x01(\x0b\x32,.featureStatistics.WeightedNumericStatistics\"\x8c\x03\n\x10StringStatistics\x12\x39\n\x0c\x63ommon_stats\x18\x01 \x01(\x0b\x32#.featureStatistics.CommonStatistics\x12\x0e\n\x06unique\x18\x02 \x01(\x04\x12\x44\n\ntop_values\x18\x03 \x03(\x0b\x32\x30.featureStatistics.StringStatistics.FreqAndValue\x12\x12\n\navg_length\x18\x04 \x01(\x02\x12\x38\n\x0erank_histogram\x18\x05 \x01(\x0b\x32 .featureStatistics.RankHistogram\x12J\n\x15weighted_string_stats\x18\x06 \x01(\x0b\x32+.featureStatistics.WeightedStringStatistics\x1aM\n\x0c\x46reqAndValue\x12\x1b\n\x0f\x64\x65precated_freq\x18\x01 \x01(\x04\x42\x02\x18\x01\x12\r\n\x05value\x18\x02 \x01(\t\x12\x11\n\tfrequency\x18\x03 \x01(\x01\"|\n\x19WeightedNumericStatistics\x12\x0c\n\x04mean\x18\x01 \x01(\x01\x12\x0f\n\x07std_dev\x18\x02 \x01(\x01\x12\x0e\n\x06median\x18\x03 \x01(\x01\x12\x30\n\nhistograms\x18\x04 \x03(\x0b\x32\x1c.featureStatistics.Histogram\"\x9a\x01\n\x18WeightedStringStatistics\x12\x44\n\ntop_values\x18\x01 \x03(\x0b\x32\x30.featureStatistics.StringStatistics.FreqAndValue\x12\x38\n\x0erank_histogram\x18\x02 \x01(\x0b\x32 .featureStatistics.RankHistogram\"\xa1\x01\n\x0f\x42ytesStatistics\x12\x39\n\x0c\x63ommon_stats\x18\x01 \x01(\x0b\x32#.featureStatistics.CommonStatistics\x12\x0e\n\x06unique\x18\x02 \x01(\x04\x12\x15\n\ravg_num_bytes\x18\x03 \x01(\x02\x12\x15\n\rmin_num_bytes\x18\x04 \x01(\x02\x12\x15\n\rmax_num_bytes\x18\x05 \x01(\x02\"\xed\x02\n\x10\x43ommonStatistics\x12\x17\n\x0fnum_non_missing\x18\x01 \x01(\x04\x12\x13\n\x0bnum_missing\x18\x02 \x01(\x04\x12\x16\n\x0emin_num_values\x18\x03 \x01(\x04\x12\x16\n\x0emax_num_values\x18\x04 \x01(\x04\x12\x16\n\x0e\x61vg_num_values\x18\x05 \x01(\x02\x12\x16\n\x0etot_num_values\x18\x08 \x01(\x04\x12:\n\x14num_values_histogram\x18\x06 \x01(\x0b\x32\x1c.featureStatistics.Histogram\x12J\n\x15weighted_common_stats\x18\x07 \x01(\x0b\x32+.featureStatistics.WeightedCommonStatistics\x12\x43\n\x1d\x66\x65\x61ture_list_length_histogram\x18\t \x01(\x0b\x32\x1c.featureStatistics.Histogram\"\xc4\x02\n\tHistogram\x12\x0f\n\x07num_nan\x18\x01 \x01(\x04\x12\x15\n\rnum_undefined\x18\x02 \x01(\x04\x12\x34\n\x07\x62uckets\x18\x03 \x03(\x0b\x32#.featureStatistics.Histogram.Bucket\x12\x38\n\x04type\x18\x04 \x01(\x0e\x32*.featureStatistics.Histogram.HistogramType\x12\x0c\n\x04name\x18\x05 \x01(\t\x1a\x63\n\x06\x42ucket\x12\x11\n\tlow_value\x18\x01 \x01(\x01\x12\x12\n\nhigh_value\x18\x02 \x01(\x01\x12\x1c\n\x10\x64\x65precated_count\x18\x03 \x01(\x04\x42\x02\x18\x01\x12\x14\n\x0csample_count\x18\x04 \x01(\x01\",\n\rHistogramType\x12\x0c\n\x08STANDARD\x10\x00\x12\r\n\tQUANTILES\x10\x01\"\xc9\x01\n\rRankHistogram\x12\x38\n\x07\x62uckets\x18\x01 \x03(\x0b\x32\'.featureStatistics.RankHistogram.Bucket\x12\x0c\n\x04name\x18\x02 \x01(\t\x1ap\n\x06\x42ucket\x12\x10\n\x08low_rank\x18\x01 \x01(\x04\x12\x11\n\thigh_rank\x18\x02 \x01(\x04\x12\x1c\n\x10\x64\x65precated_count\x18\x03 \x01(\x04\x42\x02\x18\x01\x12\r\n\x05label\x18\x04 \x01(\t\x12\x14\n\x0csample_count\x18\x05 \x01(\x01\x62\x06proto3') ) _FEATURENAMESTATISTICS_TYPE = _descriptor.EnumDescriptor( name='Type', full_name='featureStatistics.FeatureNameStatistics.Type', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='INT', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='FLOAT', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='STRING', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='BYTES', index=3, number=3, options=None, type=None), ], containing_type=None, options=None, serialized_start=636, serialized_end=685, ) _sym_db.RegisterEnumDescriptor(_FEATURENAMESTATISTICS_TYPE) _HISTOGRAM_HISTOGRAMTYPE = _descriptor.EnumDescriptor( name='HistogramType', full_name='featureStatistics.Histogram.HistogramType', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='STANDARD', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='QUANTILES', index=1, number=1, options=None, type=None), ], containing_type=None, options=None, serialized_start=2735, serialized_end=2779, ) _sym_db.RegisterEnumDescriptor(_HISTOGRAM_HISTOGRAMTYPE) _DATASETFEATURESTATISTICSLIST = _descriptor.Descriptor( name='DatasetFeatureStatisticsList', full_name='featureStatistics.DatasetFeatureStatisticsList', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='datasets', full_name='featureStatistics.DatasetFeatureStatisticsList.datasets', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=47, serialized_end=140, ) _DATASETFEATURESTATISTICS = _descriptor.Descriptor( name='DatasetFeatureStatistics', full_name='featureStatistics.DatasetFeatureStatistics', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='featureStatistics.DatasetFeatureStatistics.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='num_examples', full_name='featureStatistics.DatasetFeatureStatistics.num_examples', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='weighted_num_examples', full_name='featureStatistics.DatasetFeatureStatistics.weighted_num_examples', index=2, number=4, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='features', full_name='featureStatistics.DatasetFeatureStatistics.features', index=3, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=143, serialized_end=296, ) _FEATURENAMESTATISTICS = _descriptor.Descriptor( name='FeatureNameStatistics', full_name='featureStatistics.FeatureNameStatistics', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='featureStatistics.FeatureNameStatistics.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='type', full_name='featureStatistics.FeatureNameStatistics.type', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='num_stats', full_name='featureStatistics.FeatureNameStatistics.num_stats', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='string_stats', full_name='featureStatistics.FeatureNameStatistics.string_stats', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='bytes_stats', full_name='featureStatistics.FeatureNameStatistics.bytes_stats', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='custom_stats', full_name='featureStatistics.FeatureNameStatistics.custom_stats', index=5, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _FEATURENAMESTATISTICS_TYPE, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='stats', full_name='featureStatistics.FeatureNameStatistics.stats', index=0, containing_type=None, fields=[]), ], serialized_start=299, serialized_end=694, ) _WEIGHTEDCOMMONSTATISTICS = _descriptor.Descriptor( name='WeightedCommonStatistics', full_name='featureStatistics.WeightedCommonStatistics', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='num_non_missing', full_name='featureStatistics.WeightedCommonStatistics.num_non_missing', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='num_missing', full_name='featureStatistics.WeightedCommonStatistics.num_missing', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='avg_num_values', full_name='featureStatistics.WeightedCommonStatistics.avg_num_values', index=2, number=3, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='tot_num_values', full_name='featureStatistics.WeightedCommonStatistics.tot_num_values', index=3, number=4, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=696, serialized_end=816, ) _CUSTOMSTATISTIC = _descriptor.Descriptor( name='CustomStatistic', full_name='featureStatistics.CustomStatistic', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='featureStatistics.CustomStatistic.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='num', full_name='featureStatistics.CustomStatistic.num', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='str', full_name='featureStatistics.CustomStatistic.str', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='histogram', full_name='featureStatistics.CustomStatistic.histogram', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='val', full_name='featureStatistics.CustomStatistic.val', index=0, containing_type=None, fields=[]), ], serialized_start=818, serialized_end=937, ) _NUMERICSTATISTICS = _descriptor.Descriptor( name='NumericStatistics', full_name='featureStatistics.NumericStatistics', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='common_stats', full_name='featureStatistics.NumericStatistics.common_stats', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='mean', full_name='featureStatistics.NumericStatistics.mean', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='std_dev', full_name='featureStatistics.NumericStatistics.std_dev', index=2, number=3, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='num_zeros', full_name='featureStatistics.NumericStatistics.num_zeros', index=3, number=4, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='min', full_name='featureStatistics.NumericStatistics.min', index=4, number=5, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='median', full_name='featureStatistics.NumericStatistics.median', index=5, number=6, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='max', full_name='featureStatistics.NumericStatistics.max', index=6, number=7, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='histograms', full_name='featureStatistics.NumericStatistics.histograms', index=7, number=8, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='weighted_numeric_stats', full_name='featureStatistics.NumericStatistics.weighted_numeric_stats', index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=940, serialized_end=1238, ) _STRINGSTATISTICS_FREQANDVALUE = _descriptor.Descriptor( name='FreqAndValue', full_name='featureStatistics.StringStatistics.FreqAndValue', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='deprecated_freq', full_name='featureStatistics.StringStatistics.FreqAndValue.deprecated_freq', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))), _descriptor.FieldDescriptor( name='value', full_name='featureStatistics.StringStatistics.FreqAndValue.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='frequency', full_name='featureStatistics.StringStatistics.FreqAndValue.frequency', index=2, number=3, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1560, serialized_end=1637, ) _STRINGSTATISTICS = _descriptor.Descriptor( name='StringStatistics', full_name='featureStatistics.StringStatistics', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='common_stats', full_name='featureStatistics.StringStatistics.common_stats', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='unique', full_name='featureStatistics.StringStatistics.unique', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='top_values', full_name='featureStatistics.StringStatistics.top_values', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='avg_length', full_name='featureStatistics.StringStatistics.avg_length', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='rank_histogram', full_name='featureStatistics.StringStatistics.rank_histogram', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='weighted_string_stats', full_name='featureStatistics.StringStatistics.weighted_string_stats', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_STRINGSTATISTICS_FREQANDVALUE, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1241, serialized_end=1637, ) _WEIGHTEDNUMERICSTATISTICS = _descriptor.Descriptor( name='WeightedNumericStatistics', full_name='featureStatistics.WeightedNumericStatistics', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='mean', full_name='featureStatistics.WeightedNumericStatistics.mean', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='std_dev', full_name='featureStatistics.WeightedNumericStatistics.std_dev', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='median', full_name='featureStatistics.WeightedNumericStatistics.median', index=2, number=3, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='histograms', full_name='featureStatistics.WeightedNumericStatistics.histograms', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1639, serialized_end=1763, ) _WEIGHTEDSTRINGSTATISTICS = _descriptor.Descriptor( name='WeightedStringStatistics', full_name='featureStatistics.WeightedStringStatistics', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='top_values', full_name='featureStatistics.WeightedStringStatistics.top_values', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='rank_histogram', full_name='featureStatistics.WeightedStringStatistics.rank_histogram', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1766, serialized_end=1920, ) _BYTESSTATISTICS = _descriptor.Descriptor( name='BytesStatistics', full_name='featureStatistics.BytesStatistics', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='common_stats', full_name='featureStatistics.BytesStatistics.common_stats', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='unique', full_name='featureStatistics.BytesStatistics.unique', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='avg_num_bytes', full_name='featureStatistics.BytesStatistics.avg_num_bytes', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='min_num_bytes', full_name='featureStatistics.BytesStatistics.min_num_bytes', index=3, number=4, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='max_num_bytes', full_name='featureStatistics.BytesStatistics.max_num_bytes', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1923, serialized_end=2084, ) _COMMONSTATISTICS = _descriptor.Descriptor( name='CommonStatistics', full_name='featureStatistics.CommonStatistics', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='num_non_missing', full_name='featureStatistics.CommonStatistics.num_non_missing', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='num_missing', full_name='featureStatistics.CommonStatistics.num_missing', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='min_num_values', full_name='featureStatistics.CommonStatistics.min_num_values', index=2, number=3, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='max_num_values', full_name='featureStatistics.CommonStatistics.max_num_values', index=3, number=4, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='avg_num_values', full_name='featureStatistics.CommonStatistics.avg_num_values', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='tot_num_values', full_name='featureStatistics.CommonStatistics.tot_num_values', index=5, number=8, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='num_values_histogram', full_name='featureStatistics.CommonStatistics.num_values_histogram', index=6, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='weighted_common_stats', full_name='featureStatistics.CommonStatistics.weighted_common_stats', index=7, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='feature_list_length_histogram', full_name='featureStatistics.CommonStatistics.feature_list_length_histogram', index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2087, serialized_end=2452, ) _HISTOGRAM_BUCKET = _descriptor.Descriptor( name='Bucket', full_name='featureStatistics.Histogram.Bucket', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='low_value', full_name='featureStatistics.Histogram.Bucket.low_value', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='high_value', full_name='featureStatistics.Histogram.Bucket.high_value', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='deprecated_count', full_name='featureStatistics.Histogram.Bucket.deprecated_count', index=2, number=3, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))), _descriptor.FieldDescriptor( name='sample_count', full_name='featureStatistics.Histogram.Bucket.sample_count', index=3, number=4, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2634, serialized_end=2733, ) _HISTOGRAM = _descriptor.Descriptor( name='Histogram', full_name='featureStatistics.Histogram', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='num_nan', full_name='featureStatistics.Histogram.num_nan', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='num_undefined', full_name='featureStatistics.Histogram.num_undefined', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='buckets', full_name='featureStatistics.Histogram.buckets', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='type', full_name='featureStatistics.Histogram.type', index=3, number=4, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='name', full_name='featureStatistics.Histogram.name', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_HISTOGRAM_BUCKET, ], enum_types=[ _HISTOGRAM_HISTOGRAMTYPE, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2455, serialized_end=2779, ) _RANKHISTOGRAM_BUCKET = _descriptor.Descriptor( name='Bucket', full_name='featureStatistics.RankHistogram.Bucket', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='low_rank', full_name='featureStatistics.RankHistogram.Bucket.low_rank', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='high_rank', full_name='featureStatistics.RankHistogram.Bucket.high_rank', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='deprecated_count', full_name='featureStatistics.RankHistogram.Bucket.deprecated_count', index=2, number=3, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))), _descriptor.FieldDescriptor( name='label', full_name='featureStatistics.RankHistogram.Bucket.label', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='sample_count', full_name='featureStatistics.RankHistogram.Bucket.sample_count', index=4, number=5, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2871, serialized_end=2983, ) _RANKHISTOGRAM = _descriptor.Descriptor( name='RankHistogram', full_name='featureStatistics.RankHistogram', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='buckets', full_name='featureStatistics.RankHistogram.buckets', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='name', full_name='featureStatistics.RankHistogram.name', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_RANKHISTOGRAM_BUCKET, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2782, serialized_end=2983, ) _DATASETFEATURESTATISTICSLIST.fields_by_name['datasets'].message_type = _DATASETFEATURESTATISTICS _DATASETFEATURESTATISTICS.fields_by_name['features'].message_type = _FEATURENAMESTATISTICS _FEATURENAMESTATISTICS.fields_by_name['type'].enum_type = _FEATURENAMESTATISTICS_TYPE _FEATURENAMESTATISTICS.fields_by_name['num_stats'].message_type = _NUMERICSTATISTICS _FEATURENAMESTATISTICS.fields_by_name['string_stats'].message_type = _STRINGSTATISTICS _FEATURENAMESTATISTICS.fields_by_name['bytes_stats'].message_type = _BYTESSTATISTICS _FEATURENAMESTATISTICS.fields_by_name['custom_stats'].message_type = _CUSTOMSTATISTIC _FEATURENAMESTATISTICS_TYPE.containing_type = _FEATURENAMESTATISTICS _FEATURENAMESTATISTICS.oneofs_by_name['stats'].fields.append( _FEATURENAMESTATISTICS.fields_by_name['num_stats']) _FEATURENAMESTATISTICS.fields_by_name['num_stats'].containing_oneof = _FEATURENAMESTATISTICS.oneofs_by_name['stats'] _FEATURENAMESTATISTICS.oneofs_by_name['stats'].fields.append( _FEATURENAMESTATISTICS.fields_by_name['string_stats']) _FEATURENAMESTATISTICS.fields_by_name['string_stats'].containing_oneof = _FEATURENAMESTATISTICS.oneofs_by_name['stats'] _FEATURENAMESTATISTICS.oneofs_by_name['stats'].fields.append( _FEATURENAMESTATISTICS.fields_by_name['bytes_stats']) _FEATURENAMESTATISTICS.fields_by_name['bytes_stats'].containing_oneof = _FEATURENAMESTATISTICS.oneofs_by_name['stats'] _CUSTOMSTATISTIC.fields_by_name['histogram'].message_type = _HISTOGRAM _CUSTOMSTATISTIC.oneofs_by_name['val'].fields.append( _CUSTOMSTATISTIC.fields_by_name['num']) _CUSTOMSTATISTIC.fields_by_name['num'].containing_oneof = _CUSTOMSTATISTIC.oneofs_by_name['val'] _CUSTOMSTATISTIC.oneofs_by_name['val'].fields.append( _CUSTOMSTATISTIC.fields_by_name['str']) _CUSTOMSTATISTIC.fields_by_name['str'].containing_oneof = _CUSTOMSTATISTIC.oneofs_by_name['val'] _CUSTOMSTATISTIC.oneofs_by_name['val'].fields.append( _CUSTOMSTATISTIC.fields_by_name['histogram']) _CUSTOMSTATISTIC.fields_by_name['histogram'].containing_oneof = _CUSTOMSTATISTIC.oneofs_by_name['val'] _NUMERICSTATISTICS.fields_by_name['common_stats'].message_type = _COMMONSTATISTICS _NUMERICSTATISTICS.fields_by_name['histograms'].message_type = _HISTOGRAM _NUMERICSTATISTICS.fields_by_name['weighted_numeric_stats'].message_type = _WEIGHTEDNUMERICSTATISTICS _STRINGSTATISTICS_FREQANDVALUE.containing_type = _STRINGSTATISTICS _STRINGSTATISTICS.fields_by_name['common_stats'].message_type = _COMMONSTATISTICS _STRINGSTATISTICS.fields_by_name['top_values'].message_type = _STRINGSTATISTICS_FREQANDVALUE _STRINGSTATISTICS.fields_by_name['rank_histogram'].message_type = _RANKHISTOGRAM _STRINGSTATISTICS.fields_by_name['weighted_string_stats'].message_type = _WEIGHTEDSTRINGSTATISTICS _WEIGHTEDNUMERICSTATISTICS.fields_by_name['histograms'].message_type = _HISTOGRAM _WEIGHTEDSTRINGSTATISTICS.fields_by_name['top_values'].message_type = _STRINGSTATISTICS_FREQANDVALUE _WEIGHTEDSTRINGSTATISTICS.fields_by_name['rank_histogram'].message_type = _RANKHISTOGRAM _BYTESSTATISTICS.fields_by_name['common_stats'].message_type = _COMMONSTATISTICS _COMMONSTATISTICS.fields_by_name['num_values_histogram'].message_type = _HISTOGRAM _COMMONSTATISTICS.fields_by_name['weighted_common_stats'].message_type = _WEIGHTEDCOMMONSTATISTICS _COMMONSTATISTICS.fields_by_name['feature_list_length_histogram'].message_type = _HISTOGRAM _HISTOGRAM_BUCKET.containing_type = _HISTOGRAM _HISTOGRAM.fields_by_name['buckets'].message_type = _HISTOGRAM_BUCKET _HISTOGRAM.fields_by_name['type'].enum_type = _HISTOGRAM_HISTOGRAMTYPE _HISTOGRAM_HISTOGRAMTYPE.containing_type = _HISTOGRAM _RANKHISTOGRAM_BUCKET.containing_type = _RANKHISTOGRAM _RANKHISTOGRAM.fields_by_name['buckets'].message_type = _RANKHISTOGRAM_BUCKET DESCRIPTOR.message_types_by_name['DatasetFeatureStatisticsList'] = _DATASETFEATURESTATISTICSLIST DESCRIPTOR.message_types_by_name['DatasetFeatureStatistics'] = _DATASETFEATURESTATISTICS DESCRIPTOR.message_types_by_name['FeatureNameStatistics'] = _FEATURENAMESTATISTICS DESCRIPTOR.message_types_by_name['WeightedCommonStatistics'] = _WEIGHTEDCOMMONSTATISTICS DESCRIPTOR.message_types_by_name['CustomStatistic'] = _CUSTOMSTATISTIC DESCRIPTOR.message_types_by_name['NumericStatistics'] = _NUMERICSTATISTICS DESCRIPTOR.message_types_by_name['StringStatistics'] = _STRINGSTATISTICS DESCRIPTOR.message_types_by_name['WeightedNumericStatistics'] = _WEIGHTEDNUMERICSTATISTICS DESCRIPTOR.message_types_by_name['WeightedStringStatistics'] = _WEIGHTEDSTRINGSTATISTICS DESCRIPTOR.message_types_by_name['BytesStatistics'] = _BYTESSTATISTICS DESCRIPTOR.message_types_by_name['CommonStatistics'] = _COMMONSTATISTICS DESCRIPTOR.message_types_by_name['Histogram'] = _HISTOGRAM DESCRIPTOR.message_types_by_name['RankHistogram'] = _RANKHISTOGRAM _sym_db.RegisterFileDescriptor(DESCRIPTOR) DatasetFeatureStatisticsList = _reflection.GeneratedProtocolMessageType('DatasetFeatureStatisticsList', (_message.Message,), dict( DESCRIPTOR = _DATASETFEATURESTATISTICSLIST, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.DatasetFeatureStatisticsList) )) _sym_db.RegisterMessage(DatasetFeatureStatisticsList) DatasetFeatureStatistics = _reflection.GeneratedProtocolMessageType('DatasetFeatureStatistics', (_message.Message,), dict( DESCRIPTOR = _DATASETFEATURESTATISTICS, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.DatasetFeatureStatistics) )) _sym_db.RegisterMessage(DatasetFeatureStatistics) FeatureNameStatistics = _reflection.GeneratedProtocolMessageType('FeatureNameStatistics', (_message.Message,), dict( DESCRIPTOR = _FEATURENAMESTATISTICS, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.FeatureNameStatistics) )) _sym_db.RegisterMessage(FeatureNameStatistics) WeightedCommonStatistics = _reflection.GeneratedProtocolMessageType('WeightedCommonStatistics', (_message.Message,), dict( DESCRIPTOR = _WEIGHTEDCOMMONSTATISTICS, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.WeightedCommonStatistics) )) _sym_db.RegisterMessage(WeightedCommonStatistics) CustomStatistic = _reflection.GeneratedProtocolMessageType('CustomStatistic', (_message.Message,), dict( DESCRIPTOR = _CUSTOMSTATISTIC, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.CustomStatistic) )) _sym_db.RegisterMessage(CustomStatistic) NumericStatistics = _reflection.GeneratedProtocolMessageType('NumericStatistics', (_message.Message,), dict( DESCRIPTOR = _NUMERICSTATISTICS, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.NumericStatistics) )) _sym_db.RegisterMessage(NumericStatistics) StringStatistics = _reflection.GeneratedProtocolMessageType('StringStatistics', (_message.Message,), dict( FreqAndValue = _reflection.GeneratedProtocolMessageType('FreqAndValue', (_message.Message,), dict( DESCRIPTOR = _STRINGSTATISTICS_FREQANDVALUE, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.StringStatistics.FreqAndValue) )) , DESCRIPTOR = _STRINGSTATISTICS, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.StringStatistics) )) _sym_db.RegisterMessage(StringStatistics) _sym_db.RegisterMessage(StringStatistics.FreqAndValue) WeightedNumericStatistics = _reflection.GeneratedProtocolMessageType('WeightedNumericStatistics', (_message.Message,), dict( DESCRIPTOR = _WEIGHTEDNUMERICSTATISTICS, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.WeightedNumericStatistics) )) _sym_db.RegisterMessage(WeightedNumericStatistics) WeightedStringStatistics = _reflection.GeneratedProtocolMessageType('WeightedStringStatistics', (_message.Message,), dict( DESCRIPTOR = _WEIGHTEDSTRINGSTATISTICS, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.WeightedStringStatistics) )) _sym_db.RegisterMessage(WeightedStringStatistics) BytesStatistics = _reflection.GeneratedProtocolMessageType('BytesStatistics', (_message.Message,), dict( DESCRIPTOR = _BYTESSTATISTICS, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.BytesStatistics) )) _sym_db.RegisterMessage(BytesStatistics) CommonStatistics = _reflection.GeneratedProtocolMessageType('CommonStatistics', (_message.Message,), dict( DESCRIPTOR = _COMMONSTATISTICS, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.CommonStatistics) )) _sym_db.RegisterMessage(CommonStatistics) Histogram = _reflection.GeneratedProtocolMessageType('Histogram', (_message.Message,), dict( Bucket = _reflection.GeneratedProtocolMessageType('Bucket', (_message.Message,), dict( DESCRIPTOR = _HISTOGRAM_BUCKET, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.Histogram.Bucket) )) , DESCRIPTOR = _HISTOGRAM, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.Histogram) )) _sym_db.RegisterMessage(Histogram) _sym_db.RegisterMessage(Histogram.Bucket) RankHistogram = _reflection.GeneratedProtocolMessageType('RankHistogram', (_message.Message,), dict( Bucket = _reflection.GeneratedProtocolMessageType('Bucket', (_message.Message,), dict( DESCRIPTOR = _RANKHISTOGRAM_BUCKET, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.RankHistogram.Bucket) )) , DESCRIPTOR = _RANKHISTOGRAM, __module__ = 'feature_statistics_pb2' # @@protoc_insertion_point(class_scope:featureStatistics.RankHistogram) )) _sym_db.RegisterMessage(RankHistogram) _sym_db.RegisterMessage(RankHistogram.Bucket) _STRINGSTATISTICS_FREQANDVALUE.fields_by_name['deprecated_freq'].has_options = True _STRINGSTATISTICS_FREQANDVALUE.fields_by_name['deprecated_freq']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')) _HISTOGRAM_BUCKET.fields_by_name['deprecated_count'].has_options = True _HISTOGRAM_BUCKET.fields_by_name['deprecated_count']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')) _RANKHISTOGRAM_BUCKET.fields_by_name['deprecated_count'].has_options = True _RANKHISTOGRAM_BUCKET.fields_by_name['deprecated_count']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')) # @@protoc_insertion_point(module_scope)
run_modelService.py
#from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer from http.server import BaseHTTPRequestHandler,HTTPServer #from SocketServer import ThreadingMixIn from socketserver import ThreadingMixIn ''' from augustus.core.ModelLoader import ModelLoader from augustus.strict import modelLoader ''' import pandas as pd import os,sys from os.path import expanduser import platform import numpy as np import configparser import threading import subprocess import argparse import re import cgi from datetime import datetime import json import sys from datetime import datetime user_records = {} class LocalModelData(object): models = {} class HTTPRequestHandler(BaseHTTPRequestHandler): def do_POST(self): print("PYTHON ######## REQUEST ####### STARTED") if None != re.search('/AION/', self.path): ctype, pdict = cgi.parse_header(self.headers.get('content-type')) if ctype == 'application/json': length = int(self.headers.get('content-length')) data = self.rfile.read(length) model = self.path.split('/')[-2] operation = self.path.split('/')[-1] data = json.loads(data) dataStr = json.dumps(data) if operation.lower() == 'predict': predict_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'AION','aion_predict.py') outputStr = subprocess.check_output([sys.executable,predict_path,dataStr]) outputStr = outputStr.decode('utf-8') outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1) outputStr = outputStr.strip() resp = outputStr elif operation.lower() == 'explain': predict_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'AION','aion_xai.py') outputStr = subprocess.check_output([sys.executable,predict_path,'local',dataStr]) outputStr = outputStr.decode('utf-8') outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1) outputStr = outputStr.strip() resp = outputStr resp=resp+"\n" resp=resp.encode() self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() self.wfile.write(resp) else: print("python ==> else2") data = {} elif None != re.search('/AION/pattern_anomaly_predict', self.path): ctype, pdict = cgi.parse_header(self.headers.get('content-type')) if ctype == 'application/json': length = int(self.headers.get('content-length')) model = self.path.split('/')[-1] data = self.rfile.read(length) data = json.loads(data) anomaly = False remarks = '' clusterid = -1 home = expanduser("~") if platform.system() == 'Windows': configfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'datadetails.json') filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json') clusterfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateClustering.csv') probfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateTransitionProbability.csv') else: configfilename = os.path.join(home,'HCLT','AION','target',model,'datadetails.json') filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json') clusterfilename = os.path.join(home,'HCLT','AION','target',model,'stateClustering.csv') probfilename = os.path.join(home,'HCLT','AION','target',model,'stateTransitionProbability.csv') dfclus = pd.read_csv(clusterfilename) dfprod = pd.read_csv(probfilename) f = open(configfilename, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) activity = configSettingsJson['activity'] sessionid = configSettingsJson['sessionid'] f = open(filename, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) groupswitching = configSettingsJson['groupswitching'] page_threshold = configSettingsJson['transitionprobability'] chain_count = configSettingsJson['transitionsequence'] chain_probability = configSettingsJson['sequencethreshold'] currentactivity = data[activity] if bool(user_records): sessionid = data[sessionid] print(sessionid,user_records['SessionID']) if sessionid != user_records['SessionID']: user_records['SessionID'] = sessionid prevactivity = '' user_records['probarry'] = [] user_records['prevclusterid'] = -1 user_records['NoOfClusterHopping'] = 0 user_records['pageclicks'] = 1 else: prevactivity = user_records['Activity'] user_records['Activity'] = currentactivity pageswitch = True if prevactivity == currentactivity or prevactivity == '': probability = 0 pageswitch = False remarks = '' else: user_records['pageclicks'] += 1 df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)] if df1.empty: remarks = 'Anomaly Detected - User in unusual state' anomaly = True clusterid = -1 probability = 0 user_records['probarry'].append(probability) n=int(chain_count) num_list = user_records['probarry'][-n:] avg = sum(num_list)/len(num_list) for index, row in dfclus.iterrows(): clusterlist = row["clusterlist"] if currentactivity in clusterlist: clusterid = row["clusterid"] else: probability = df1['Probability'].iloc[0] user_records['probarry'].append(probability) n=int(chain_count) num_list = user_records['probarry'][-n:] davg = sum(num_list)/len(num_list) for index, row in dfclus.iterrows(): clusterlist = row["clusterlist"] if currentactivity in clusterlist: clusterid = row["clusterid"] remarks = '' if user_records['prevclusterid'] != -1: if probability == 0 and user_records['prevclusterid'] != clusterid: user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1 if user_records['pageclicks'] == 1: remarks = 'Anomaly Detected - Frequent Cluster Hopping' anomaly = True else: remarks = 'Cluster Hopping Detected' user_records['pageclicks'] = 0 if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False: remarks = 'Anomaly Detected - Multiple Cluster Hopping' anomaly = True elif probability == 0: remarks = 'Anomaly Detected - Unusual State Transition Detected' anomaly = True elif probability <= float(page_threshold): remarks = 'Anomaly Detected - In-frequent State Transition Detected' anomaly = True else: #print(pageswitch) #print(probability) if pageswitch == True: if probability == 0: remarks = 'Anomaly Detected - Unusual State Transition Detected' anomaly = True elif probability <= float(page_threshold): remarks = 'Anomaly Detected - In-frequent State Transition Detected' anomaly = True else: remarks = '' if davg < float(chain_probability): if anomaly == False: remarks = 'Anomaly Detected - In-frequent Pattern Detected' anomaly = True else: user_records['SessionID'] = data[sessionid] user_records['Activity'] = data[activity] user_records['probability'] = 0 user_records['probarry'] = [] user_records['chainprobability'] = 0 user_records['prevclusterid'] = -1 user_records['NoOfClusterHopping'] = 0 user_records['pageclicks'] = 1 for index, row in dfclus.iterrows(): clusterlist = row["clusterlist"] if currentactivity in clusterlist: clusterid = row["clusterid"] user_records['prevclusterid'] = clusterid self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() resp = '{"status":"SUCCESS","data":{"Anomaly":"'+str(anomaly)+'","Remarks":"'+str(remarks)+'"}}' resp=resp+"\n" resp=resp.encode() self.wfile.write(resp) else: self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() resp = '{"Anomaly":"Error","Remarks":"'+str(Int)+'"}' resp=resp+"\n" resp=resp.encode() self.wfile.write(resp) elif None != re.search('/AION/pattern_anomaly_settings', self.path): ctype, pdict = cgi.parse_header(self.headers.get('content-type')) if ctype == 'application/json': length = int(self.headers.get('content-length')) data = self.rfile.read(length) #print(data) #keyList = list(data.keys()) #print(keyList[0]) model = self.path.split('/')[-1] #print(model) data = json.loads(data) #dataStr = json.dumps(data) groupswitching = data['groupswitching'] transitionprobability = data['transitionprobability'] transitionsequence = data['transitionsequence'] sequencethreshold = data['sequencethreshold'] home = expanduser("~") if platform.system() == 'Windows': filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json') else: filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json') #print(filename) data = {} data['groupswitching'] = groupswitching data['transitionprobability'] = transitionprobability data['transitionsequence'] = transitionsequence data['sequencethreshold'] = sequencethreshold updatedConfig = json.dumps(data) with open(filename, "w") as fpWrite: fpWrite.write(updatedConfig) fpWrite.close() self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() resp = '{"Status":"SUCCESS"}' resp=resp+"\n" resp=resp.encode() self.wfile.write(resp) else: print("python ==> else2") data = {} else: print("python ==> else1") self.send_response(403) self.send_header('Content-Type', 'application/json') self.end_headers() print("PYTHON ######## REQUEST ####### ENDED") return def do_GET(self): print("PYTHON ######## REQUEST ####### STARTED") if None != re.search('/AION/predict', self.path): self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() model = self.path.split('/')[-1] display_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'target',model,'display.json') displaymsg = 'Data in JSON Format' if(os.path.isfile(display_path)): with open(display_path) as file: config = json.load(file) file.close() features = config['modelFeatures'] datajson={} for feature in features: datajson[feature] = 'Value' displaymsg = json.dumps(datajson) msg=""" URL:{url} RequestType: POST Content-Type=application/json Body: {displaymsg} """.format(url=self.path,displaymsg=displaymsg) self.wfile.write(msg.encode()) else: self.send_response(403) self.send_header('Content-Type', 'application/json') self.end_headers() return class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): allow_reuse_address = True def shutdown(self): self.socket.close() HTTPServer.shutdown(self) class SimpleHttpServer(): def __init__(self, ip, port): self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler) def start(self): self.server_thread = threading.Thread(target=self.server.serve_forever) self.server_thread.daemon = True self.server_thread.start() def waitForThread(self): self.server_thread.join() def stop(self): self.server.shutdown() self.waitForThread() if __name__=='__main__': parser = argparse.ArgumentParser(description='HTTP Server') parser.add_argument('port', type=int, help='Listening port for HTTP Server') parser.add_argument('ip', help='HTTP Server IP') args = parser.parse_args() server = SimpleHttpServer(args.ip, args.port) #delete file #create file #write just msg as "started" print('HTTP Server Running...........') #file close server.start() server.waitForThread()
runService.py
#from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer from http.server import BaseHTTPRequestHandler,HTTPServer #from SocketServer import ThreadingMixIn from socketserver import ThreadingMixIn ''' from augustus.core.ModelLoader import ModelLoader from augustus.strict import modelLoader ''' import pandas as pd from datetime import datetime import os,sys from os.path import expanduser import platform import numpy as np import configparser import threading import subprocess import argparse import re import cgi import time from datetime import datetime import json import sys from datetime import datetime import sqlite3 from os.path import expanduser from pathlib import Path from io import BytesIO DEPLOY_DATABASE_PATH = os.path.join(os.path.join(os.path.dirname(__file__)),'database') targetPath = Path(DEPLOY_DATABASE_PATH) targetPath.mkdir(parents=True, exist_ok=True) modelVersion = 'run_1' version = 1 class sqlite_db(): def __init__(self, location, database_file=None): if not isinstance(location, Path): location = Path(location) if database_file: self.database_name = database_file else: self.database_name = location.stem + '.db' db_file = str(location/self.database_name) self.conn = sqlite3.connect(db_file) self.cursor = self.conn.cursor() self.tables = [] def table_exists(self, name): if name in self.tables: return True elif name: query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';" listOfTables = self.cursor.execute(query).fetchall() if len(listOfTables) > 0 : self.tables.append(name) return True return False def read(self, table_name,condition=''): if condition == '': return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn) else: return pd.read_sql_query(f"SELECT * FROM {table_name} WHERE {condition}", self.conn) def create_table(self,name, columns, dtypes): query = f'CREATE TABLE IF NOT EXISTS {name} (' for column, data_type in zip(columns, dtypes): query += f"'{column}' TEXT," query = query[:-1] query += ');' self.conn.execute(query) return True def update(self,table_name,updates,condition): update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}' self.cursor.execute(update_query) self.conn.commit() return True def write(self,data, table_name): if not self.table_exists(table_name): self.create_table(table_name, data.columns, data.dtypes) tuple_data = list(data.itertuples(index=False, name=None)) insert_query = f'INSERT INTO {table_name} VALUES(' for i in range(len(data.columns)): insert_query += '?,' insert_query = insert_query[:-1] + ')' self.cursor.executemany(insert_query, tuple_data) self.conn.commit() return True def delete(self, name): pass def close(self): self.conn.close() user_records = {} class LocalModelData(object): models = {} class HTTPRequestHandler(BaseHTTPRequestHandler): def do_POST(self): print("PYTHON ######## REQUEST ####### STARTED") if None != re.search('/AION/', self.path): ctype, pdict = cgi.parse_header(self.headers.get('content-type')) if ctype == 'application/json': length = int(self.headers.get('content-length')) #data = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1) data = self.rfile.read(length) model = self.path.split('/')[-2] operation = self.path.split('/')[-1] #data = json.loads(data) #dataStr = json.dumps(data) home = expanduser("~") dataStr = data sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db') model_path = os.path.join(os.path.dirname(__file__),modelVersion) DATA_FILE_PATH = os.path.join(os.path.dirname(__file__),'temp') Path(DATA_FILE_PATH).mkdir(parents=True, exist_ok=True) isdir = os.path.isdir(model_path) if isdir: if operation.lower() == 'predict': if not sqlite_dbObj.table_exists('servingDetails'): data = {'usecase':model,'noOfPredictCalls':0,'noOfDriftCalls':0} data = pd.DataFrame(data, index=[0]) sqlite_dbObj.create_table('servingDetails',data.columns, data.dtypes) df2 = pd.read_json(BytesIO(dataStr), orient ='records') if not sqlite_dbObj.table_exists('prodData'): sqlite_dbObj.create_table('prodData',df2.columns, df2.dtypes) sqlite_dbObj.write(df2,'prodData') data = sqlite_dbObj.read('servingDetails',"usecase = '"+model+"'") if len(data) == 0: data = {'usecase':model,'noOfPredictCalls':1,'noOfDriftCalls':0} data = pd.DataFrame(data, index=[0]) sqlite_dbObj.write(data,'servingDetails') else: noofPredictCalls = int(data['noOfPredictCalls'].iloc[0])+1 sqlite_dbObj.update('servingDetails',"noOfPredictCalls = '"+str(noofPredictCalls)+"'","usecase = '"+model+"'") predict_path = os.path.join(model_path,'aion_predict.py') outputStr = subprocess.check_output([sys.executable,predict_path,dataStr]) outputStr = outputStr.decode('utf-8') outputStr = re.search(r'predictions:(.*)',str(outputStr), re.IGNORECASE).group(1) outputStr = outputStr.strip() resp = outputStr elif operation.lower() == 'monitoring': if not sqlite_dbObj.table_exists('monitoring'): data = {'usecase':model,'status':'No Drift','Msg':'No Input Drift Found','RecordTime':'Time','version':'1'} data = pd.DataFrame(data, index=[0]) sqlite_dbObj.create_table('monitoring',data.columns, data.dtypes) trainingDataPath = os.path.join(os.path.dirname(__file__),modelVersion,'data','preprocesseddata.csv') data = sqlite_dbObj.read('prodData') filetimestamp = str(int(time.time())) dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv') data.to_csv(dataFile, index=False) predict_path = os.path.join(model_path,'aion_ipdrift.py') inputJSON={'trainingDataLocation':trainingDataPath,'currentDataLocation':dataFile} outputStr = subprocess.check_output([sys.executable,predict_path,json.dumps(inputJSON)]) outputStr = outputStr.decode('utf-8') outputStr = re.search(r'drift:(.*)',str(outputStr), re.IGNORECASE).group(1) outputStr = outputStr.strip() outputData = json.loads(outputStr) status = outputData['status'] if status == 'SUCCESS': Msg = str(outputData['data']) else: Msg = 'Error during drift analysis' now = datetime.now() # current date and time date_time = now.strftime("%m/%d/%Y, %H:%M:%S") data = {'usecase':model,'status':status,'Msg':Msg,'RecordTime':date_time,'version':version} data = pd.DataFrame(data, index=[0]) sqlite_dbObj.write(data,'monitoring') resp = outputStr resp=resp+"\n" resp=resp.encode() self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() self.wfile.write(resp) else: print("python ==> else2") data = {} elif None != re.search('/AION/pattern_anomaly_predict', self.path): ctype, pdict = cgi.parse_header(self.headers.get('content-type')) if ctype == 'application/json': length = int(self.headers.get('content-length')) model = self.path.split('/')[-1] data = self.rfile.read(length) data = json.loads(data) anomaly = False remarks = '' clusterid = -1 home = expanduser("~") if platform.system() == 'Windows': configfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'datadetails.json') filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json') clusterfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateClustering.csv') probfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateTransitionProbability.csv') else: configfilename = os.path.join(home,'HCLT','AION','target',model,'datadetails.json') filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json') clusterfilename = os.path.join(home,'HCLT','AION','target',model,'stateClustering.csv') probfilename = os.path.join(home,'HCLT','AION','target',model,'stateTransitionProbability.csv') dfclus = pd.read_csv(clusterfilename) dfprod = pd.read_csv(probfilename) f = open(configfilename, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) activity = configSettingsJson['activity'] sessionid = configSettingsJson['sessionid'] f = open(filename, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) groupswitching = configSettingsJson['groupswitching'] page_threshold = configSettingsJson['transitionprobability'] chain_count = configSettingsJson['transitionsequence'] chain_probability = configSettingsJson['sequencethreshold'] currentactivity = data[activity] if bool(user_records): sessionid = data[sessionid] print(sessionid,user_records['SessionID']) if sessionid != user_records['SessionID']: user_records['SessionID'] = sessionid prevactivity = '' user_records['probarry'] = [] user_records['prevclusterid'] = -1 user_records['NoOfClusterHopping'] = 0 user_records['pageclicks'] = 1 else: prevactivity = user_records['Activity'] user_records['Activity'] = currentactivity pageswitch = True if prevactivity == currentactivity or prevactivity == '': probability = 0 pageswitch = False remarks = '' else: user_records['pageclicks'] += 1 df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)] if df1.empty: remarks = 'Anomaly Detected - User in unusual state' anomaly = True clusterid = -1 probability = 0 user_records['probarry'].append(probability) n=int(chain_count) num_list = user_records['probarry'][-n:] avg = sum(num_list)/len(num_list) for index, row in dfclus.iterrows(): clusterlist = row["clusterlist"] if currentactivity in clusterlist: clusterid = row["clusterid"] else: probability = df1['Probability'].iloc[0] user_records['probarry'].append(probability) n=int(chain_count) num_list = user_records['probarry'][-n:] davg = sum(num_list)/len(num_list) for index, row in dfclus.iterrows(): clusterlist = row["clusterlist"] if currentactivity in clusterlist: clusterid = row["clusterid"] remarks = '' if user_records['prevclusterid'] != -1: if probability == 0 and user_records['prevclusterid'] != clusterid: user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1 if user_records['pageclicks'] == 1: remarks = 'Anomaly Detected - Frequent Cluster Hopping' anomaly = True else: remarks = 'Cluster Hopping Detected' user_records['pageclicks'] = 0 if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False: remarks = 'Anomaly Detected - Multiple Cluster Hopping' anomaly = True elif probability == 0: remarks = 'Anomaly Detected - Unusual State Transition Detected' anomaly = True elif probability <= float(page_threshold): remarks = 'Anomaly Detected - In-frequent State Transition Detected' anomaly = True else: #print(pageswitch) #print(probability) if pageswitch == True: if probability == 0: remarks = 'Anomaly Detected - Unusual State Transition Detected' anomaly = True elif probability <= float(page_threshold): remarks = 'Anomaly Detected - In-frequent State Transition Detected' anomaly = True else: remarks = '' if davg < float(chain_probability): if anomaly == False: remarks = 'Anomaly Detected - In-frequent Pattern Detected' anomaly = True else: user_records['SessionID'] = data[sessionid] user_records['Activity'] = data[activity] user_records['probability'] = 0 user_records['probarry'] = [] user_records['chainprobability'] = 0 user_records['prevclusterid'] = -1 user_records['NoOfClusterHopping'] = 0 user_records['pageclicks'] = 1 for index, row in dfclus.iterrows(): clusterlist = row["clusterlist"] if currentactivity in clusterlist: clusterid = row["clusterid"] user_records['prevclusterid'] = clusterid self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() resp = '{"status":"SUCCESS","data":{"Anomaly":"'+str(anomaly)+'","Remarks":"'+str(remarks)+'"}}' resp=resp+"\n" resp=resp.encode() self.wfile.write(resp) else: self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() resp = '{"Anomaly":"Error","Remarks":"'+str(Int)+'"}' resp=resp+"\n" resp=resp.encode() self.wfile.write(resp) elif None != re.search('/AION/pattern_anomaly_settings', self.path): ctype, pdict = cgi.parse_header(self.headers.get('content-type')) if ctype == 'application/json': length = int(self.headers.get('content-length')) data = self.rfile.read(length) #print(data) #keyList = list(data.keys()) #print(keyList[0]) model = self.path.split('/')[-1] #print(model) data = json.loads(data) #dataStr = json.dumps(data) groupswitching = data['groupswitching'] transitionprobability = data['transitionprobability'] transitionsequence = data['transitionsequence'] sequencethreshold = data['sequencethreshold'] home = expanduser("~") if platform.system() == 'Windows': filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json') else: filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json') #print(filename) data = {} data['groupswitching'] = groupswitching data['transitionprobability'] = transitionprobability data['transitionsequence'] = transitionsequence data['sequencethreshold'] = sequencethreshold updatedConfig = json.dumps(data) with open(filename, "w") as fpWrite: fpWrite.write(updatedConfig) fpWrite.close() self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() resp = '{"Status":"SUCCESS"}' resp=resp+"\n" resp=resp.encode() self.wfile.write(resp) else: print("python ==> else2") data = {} else: print("python ==> else1") self.send_response(403) self.send_header('Content-Type', 'application/json') self.end_headers() print("PYTHON ######## REQUEST ####### ENDED") return def do_GET(self): print("PYTHON ######## REQUEST ####### STARTED") if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path): usecase = self.path.split('/')[-2] operation = self.path.split('/')[-1] if operation.lower() == 'metrices': sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db') if sqlite_dbObj.table_exists('servingDetails'): df1 = sqlite_dbObj.read('servingDetails',"usecase = '"+usecase+"'") else: df1 = pd.DataFrame() if sqlite_dbObj.table_exists('monitoring'): df2 = sqlite_dbObj.read('monitoring') else: df2 = pd.DataFrame() if sqlite_dbObj.table_exists('modeldetails'): df3 = sqlite_dbObj.read('modeldetails') else: df3 = pd.DataFrame() msg='<html>\n' msg+='<head>\n' msg+='<title>Model Metrices</title>\n' msg+='</head>\n' msg+="""<style>table, th, td { border: 1px solid black; border-collapse: collapse;}</style>""" msg+='<body>\n' msg+='<h2>Model Metrices - Deployed Version '+str(version)+'</h2>' msg+='<br/>\n' msg+='<table style="width:80%">\n' msg+="""<tr> <th>Model</th> <th>Version</th> <th>ScoreType</th> <th>Score</th> </tr """ for idx in reversed(df3.index): msg += "<tr>\n" msg += "<td>"+str(df3.usecase[idx])+"</td>\n" msg += "<td>"+str(df3.version[idx])+"</td>\n" msg += "<td>"+str(df3.scoreType[idx])+"</td>\n" msg += "<td>"+str(df3.score[idx])+"</td>\n" msg += "</tr>\n" msg += '</table>\n' msg += '<br/>\n' msg += '<br/>\n' msg+='<table style="width:50%">\n' msg+='<tr>\n' msg+='<td>No of Predictions</td>\n' if df1.shape[0] > 0: msg+='<td>'+str(df1['noOfPredictCalls'].iloc[0])+'</td>\n' else: msg+='<td>0</td>\n' msg+='</tr>\n' msg+='<tr>\n' msg+='<td>No of Ground Truth</td>\n' msg+='<td>0</td>\n' msg+='</tr>\n' msg += '</table>\n' msg += '<br/>\n' msg+='<table style="width:100%">\n' msg+="""<tr> <th>UseCase</th> <th>Version</th> <th>Status</th> <th>Message</th> <th>Time</th> </tr> """ for idx in reversed(df2.index): msg += "<tr>\n" msg += "<td>"+str(df2.usecase[idx])+"</td>\n" msg += "<td>"+str(df3.version[idx])+"</td>\n" msg += "<td>"+str(df2.status[idx])+"</td>\n" msg += "<td>"+str(df2.Msg[idx])+"</td>\n" msg += "<td>"+str(df2.RecordTime[idx])+"</td>\n" msg += "</tr>\n" msg += '</table>\n' msg += '</body>\n' msg += '</html>\n' self.send_response(200) self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write(msg.encode()) else: self.send_response(403) self.send_header('Content-Type', 'application/json') self.end_headers() return class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): allow_reuse_address = True def shutdown(self): self.socket.close() HTTPServer.shutdown(self) class file_status(): def __init__(self,file): self.files_status = {} self.initializeFileStatus(file) def initializeFileStatus(self, file): self.files_status = {'path': file, 'time':file.stat().st_mtime} def is_file_changed(self): if self.files_status['path'].stat().st_mtime > self.files_status['time']: self.files_status['time'] = self.files_status['path'].stat().st_mtime return True return False def run(self): while( True): time.sleep(30) if self.is_file_changed(): readRun() class SimpleHttpServer(): def __init__(self, ip, port,model_file_path): self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler) self.status_checker = file_status(model_file_path) def start(self): self.server_thread = threading.Thread(target=self.server.serve_forever) self.server_thread.daemon = True self.server_thread.start() self.status_thread = threading.Thread(target=self.status_checker.run) self.status_thread.start() def waitForThread(self): self.server_thread.join() def stop(self): self.server.shutdown() self.waitForThread() def readRun(boot=False): filename = os.path.join(os.path.join(os.path.dirname(__file__)),'run') f = open (filename, "r") data = json.loads(f.read()) global modelVersion global version modelVersion = 'run_'+str(data['version']) version = str(data['version']) sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db') dataa = {'usecase':data['usecase'],'version':data['version'],'scoreType':data['scoreType'],'score':data['score']} data = pd.DataFrame(dataa, index=[0]) if not sqlite_dbObj.table_exists('modeldetails'): sqlite_dbObj.create_table('modeldetails',data.columns, data.dtypes) rdata = sqlite_dbObj.read('modeldetails',"version = '"+dataa['version']+"'") if (rdata.shape[0]) == 0 or (not boot): sqlite_dbObj.write(data,'modeldetails') readRun(boot=True) if __name__=='__main__': filename = os.path.join(os.path.join(os.path.dirname(__file__)),'run') parser = argparse.ArgumentParser(description='HTTP Server') parser.add_argument('port', type=int, help='Listening port for HTTP Server') parser.add_argument('ip', help='HTTP Server IP') args = parser.parse_args() server = SimpleHttpServer(args.ip, args.port,Path(filename)) #delete file #create file #write just msg as "started" print('HTTP Server Running...........') #file close server.start() server.waitForThread()
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
model_service.py
''' from AION_185 import aion_prediction from AION_185 import featureslist from AION_185 import aion_drift from AION_185 import aion_performance ''' #from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer from http.server import BaseHTTPRequestHandler,HTTPServer #from SocketServer import ThreadingMixIn from socketserver import ThreadingMixIn ''' from augustus.core.ModelLoader import ModelLoader from augustus.strict import modelLoader ''' import pandas as pd import os,sys from os.path import expanduser import platform import numpy as np import configparser import threading import subprocess import argparse import re import cgi from datetime import datetime import json import sys from datetime import datetime user_records = {} class LocalModelData(object): models = {} class HTTPRequestHandler(BaseHTTPRequestHandler): def do_POST(self): print("PYTHON ######## REQUEST ####### STARTED") if None != re.search('/AION/predict', self.path): ctype, pdict = cgi.parse_header(self.headers.get('content-type')) if ctype == 'application/json': length = int(self.headers.get('content-length')) #data = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1) data = self.rfile.read(length) data = json.loads(data) dataStr = json.dumps(data) outputStr = aion_prediction.predict(dataStr) outputStr = outputStr.strip() resp = outputStr resp=resp+"\n" resp=resp.encode() self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() self.wfile.write(resp) else: print("python ==> else2") data = {} elif None != re.search('/AION/features', self.path): ctype, pdict = cgi.parse_header(self.headers.get('content-type')) if ctype == 'application/json': length = int(self.headers.get('content-length')) outputStr = featureslist.getfeatures() outputStr = outputStr.strip() resp = outputStr resp=resp+"\n" resp=resp.encode() self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() self.wfile.write(resp) else: print("python ==> else2") data = {} elif None != re.search('/AION/monitoring', self.path): ctype, pdict = cgi.parse_header(self.headers.get('content-type')) if ctype == 'application/json': length = int(self.headers.get('content-length')) data = self.rfile.read(length) model = self.path.split('/')[-1] data = json.loads(data) dataStr = json.dumps(data) outputStr = aion_drift.drift(dataStr) outputStr = outputStr.strip() resp = outputStr resp=resp+"\n" resp=resp.encode() self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() self.wfile.write(resp) else: print("python ==> else2") data = {} elif None != re.search('/AION/performance', self.path): ctype, pdict = cgi.parse_header(self.headers.get('content-type')) if ctype == 'application/json': length = int(self.headers.get('content-length')) data = self.rfile.read(length) data = json.loads(data) dataStr = json.dumps(data) outputStr = aion_performance.drift(dataStr) outputStr = outputStr.strip() resp = outputStr resp=resp+"\n" resp=resp.encode() self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() self.wfile.write(resp) else: print("python ==> else2") data = {} elif None != re.search('/AION/pattern_anomaly_predict', self.path): ctype, pdict = cgi.parse_header(self.headers.get('content-type')) if ctype == 'application/json': length = int(self.headers.get('content-length')) model = self.path.split('/')[-1] data = self.rfile.read(length) data = json.loads(data) anomaly = False remarks = '' clusterid = -1 home = expanduser("~") if platform.system() == 'Windows': configfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'datadetails.json') filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json') clusterfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateClustering.csv') probfilename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'stateTransitionProbability.csv') else: configfilename = os.path.join(home,'HCLT','AION','target',model,'datadetails.json') filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json') clusterfilename = os.path.join(home,'HCLT','AION','target',model,'stateClustering.csv') probfilename = os.path.join(home,'HCLT','AION','target',model,'stateTransitionProbability.csv') dfclus = pd.read_csv(clusterfilename) dfprod = pd.read_csv(probfilename) f = open(configfilename, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) activity = configSettingsJson['activity'] sessionid = configSettingsJson['sessionid'] f = open(filename, "r") configSettings = f.read() f.close() configSettingsJson = json.loads(configSettings) groupswitching = configSettingsJson['groupswitching'] page_threshold = configSettingsJson['transitionprobability'] chain_count = configSettingsJson['transitionsequence'] chain_probability = configSettingsJson['sequencethreshold'] currentactivity = data[activity] if bool(user_records): sessionid = data[sessionid] print(sessionid,user_records['SessionID']) if sessionid != user_records['SessionID']: user_records['SessionID'] = sessionid prevactivity = '' user_records['probarry'] = [] user_records['prevclusterid'] = -1 user_records['NoOfClusterHopping'] = 0 user_records['pageclicks'] = 1 else: prevactivity = user_records['Activity'] user_records['Activity'] = currentactivity pageswitch = True if prevactivity == currentactivity or prevactivity == '': probability = 0 pageswitch = False remarks = '' else: user_records['pageclicks'] += 1 df1 = dfprod[(dfprod['State'] == prevactivity) & (dfprod['NextState'] == currentactivity)] if df1.empty: remarks = 'Anomaly Detected - User in unusual state' anomaly = True clusterid = -1 probability = 0 user_records['probarry'].append(probability) n=int(chain_count) num_list = user_records['probarry'][-n:] avg = sum(num_list)/len(num_list) for index, row in dfclus.iterrows(): clusterlist = row["clusterlist"] if currentactivity in clusterlist: clusterid = row["clusterid"] else: probability = df1['Probability'].iloc[0] user_records['probarry'].append(probability) n=int(chain_count) num_list = user_records['probarry'][-n:] davg = sum(num_list)/len(num_list) for index, row in dfclus.iterrows(): clusterlist = row["clusterlist"] if currentactivity in clusterlist: clusterid = row["clusterid"] remarks = '' if user_records['prevclusterid'] != -1: if probability == 0 and user_records['prevclusterid'] != clusterid: user_records['NoOfClusterHopping'] = user_records['NoOfClusterHopping']+1 if user_records['pageclicks'] == 1: remarks = 'Anomaly Detected - Frequent Cluster Hopping' anomaly = True else: remarks = 'Cluster Hopping Detected' user_records['pageclicks'] = 0 if user_records['NoOfClusterHopping'] > int(groupswitching) and anomaly == False: remarks = 'Anomaly Detected - Multiple Cluster Hopping' anomaly = True elif probability == 0: remarks = 'Anomaly Detected - Unusual State Transition Detected' anomaly = True elif probability <= float(page_threshold): remarks = 'Anomaly Detected - In-frequent State Transition Detected' anomaly = True else: #print(pageswitch) #print(probability) if pageswitch == True: if probability == 0: remarks = 'Anomaly Detected - Unusual State Transition Detected' anomaly = True elif probability <= float(page_threshold): remarks = 'Anomaly Detected - In-frequent State Transition Detected' anomaly = True else: remarks = '' if davg < float(chain_probability): if anomaly == False: remarks = 'Anomaly Detected - In-frequent Pattern Detected' anomaly = True else: user_records['SessionID'] = data[sessionid] user_records['Activity'] = data[activity] user_records['probability'] = 0 user_records['probarry'] = [] user_records['chainprobability'] = 0 user_records['prevclusterid'] = -1 user_records['NoOfClusterHopping'] = 0 user_records['pageclicks'] = 1 for index, row in dfclus.iterrows(): clusterlist = row["clusterlist"] if currentactivity in clusterlist: clusterid = row["clusterid"] user_records['prevclusterid'] = clusterid self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() resp = '{"status":"SUCCESS","data":{"Anomaly":"'+str(anomaly)+'","Remarks":"'+str(remarks)+'"}}' resp=resp+"\n" resp=resp.encode() self.wfile.write(resp) else: self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() resp = '{"Anomaly":"Error","Remarks":"'+str(Int)+'"}' resp=resp+"\n" resp=resp.encode() self.wfile.write(resp) elif None != re.search('/AION/pattern_anomaly_settings', self.path): ctype, pdict = cgi.parse_header(self.headers.get('content-type')) if ctype == 'application/json': length = int(self.headers.get('content-length')) data = self.rfile.read(length) #print(data) #keyList = list(data.keys()) #print(keyList[0]) model = self.path.split('/')[-1] #print(model) data = json.loads(data) #dataStr = json.dumps(data) groupswitching = data['groupswitching'] transitionprobability = data['transitionprobability'] transitionsequence = data['transitionsequence'] sequencethreshold = data['sequencethreshold'] home = expanduser("~") if platform.system() == 'Windows': filename = os.path.join(home,'AppData','Local','HCLT','AION','target',model,'clickstream.json') else: filename = os.path.join(home,'HCLT','AION','target',model,'clickstream.json') #print(filename) data = {} data['groupswitching'] = groupswitching data['transitionprobability'] = transitionprobability data['transitionsequence'] = transitionsequence data['sequencethreshold'] = sequencethreshold updatedConfig = json.dumps(data) with open(filename, "w") as fpWrite: fpWrite.write(updatedConfig) fpWrite.close() self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() resp = '{"Status":"SUCCESS"}' resp=resp+"\n" resp=resp.encode() self.wfile.write(resp) else: print("python ==> else2") data = {} else: print("python ==> else1") self.send_response(403) self.send_header('Content-Type', 'application/json') self.end_headers() print("PYTHON ######## REQUEST ####### ENDED") return def do_GET(self): print("PYTHON ######## REQUEST ####### STARTED") if None != re.search('/AION/predict', self.path): self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() features = featureslist.getfeatures() displaymsg = 'Data in JSON Format' config = json.loads(features) features = config['features'] datajson={} for feature in features: if feature['Type'].lower() != 'target': datajson[feature['feature']] = 'Value' displaymsg = json.dumps(datajson) msg=""" URL:{url} RequestType: POST Content-Type=application/json Body: {displaymsg} """.format(url=self.path,displaymsg=displaymsg) self.wfile.write(msg.encode()) elif None != re.search('/AION/monitoring', self.path): self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() displaymsg='{"trainingDataLocation":"Reference Data File Path","currentDataLocation":"Latest Data File Path"}' msg=""" URL:{url} RequestType: POST Content-Type=application/json Body: {displaymsg}""".format(url=self.path,displaymsg=displaymsg) self.wfile.write(msg.encode()) elif None != re.search('/AION/features', self.path): outputStr = featureslist.getfeatures() outputStr = outputStr.strip() resp = outputStr resp=resp+"\n" resp=resp.encode() self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() self.wfile.write(resp) else: msg=""" URL for prediction: /AION/predict URL for features List: /AION/features URL for monitoring: /AION/monitoring URL for performance: /AION/performance""" self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() self.wfile.write(msg.encode()) return class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): allow_reuse_address = True def shutdown(self): self.socket.close() HTTPServer.shutdown(self) class SimpleHttpServer(): def __init__(self, ip, port): self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler) def start(self): self.server_thread = threading.Thread(target=self.server.serve_forever) self.server_thread.daemon = True self.server_thread.start() def waitForThread(self): self.server_thread.join() def stop(self): self.server.shutdown() self.waitForThread() if __name__=='__main__': parser = argparse.ArgumentParser(description='HTTP Server') parser.add_argument('port', type=int, help='Listening port for HTTP Server') parser.add_argument('ip', help='HTTP Server IP') args = parser.parse_args() server = SimpleHttpServer(args.ip, args.port) #delete file #create file #write just msg as "started" print('HTTP Server Running...........') #file close server.start() server.waitForThread()
dl_aion_predict.py
import sys import os import pickle import json import traceback import warnings warnings.filterwarnings("ignore") import numpy as np import pandas as pd import dask.dataframe as dd import scipy from pandas import json_normalize import dask.distributed from dask_ml.wrappers import ParallelPostFit class incBatchPredictor(): def __init__(self): self.home = os.path.dirname(os.path.abspath(__file__)) self.configPath = os.path.join(self.home, 'etc', 'config.json') self.configDict = {} self.incProfilerPath = '' self.incSelectorPath = '' self.modelPath = '' self.incProfiler = None self.incSelector = None self.model = None self.targetFeature = None self.trainingFeatures = None self.modelName = '' self.problemType = '' self.algorithm = '' self.dataLocation = "" self.nworkers = None self.npartitions = None self.threadsPerWorker = None def get_nworkers(self): return self.nworkers def get_npartitions(self): return self.npartitions def get_threadsPerWorker(self): return self.threadsPerWorker def readData(self,data): try: if os.path.splitext(data)[1] in [".tsv",".csv",".data"]: df = dd.read_csv(data, # sep=r'\s*,\s*', assume_missing=True, parse_dates=True, infer_datetime_format=True, sample=1000000, # dtype={'caliper': 'object', # 'timestamp': 'object'}, # dtype='object', na_values=['-','?'] ) df = df.repartition(self.npartitions) else: if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) for key in jsonData: if type(jsonData[key]) == str: try: x = eval(jsonData[key]) if type(x) == int: jsonData[key] = int(jsonData[key]) print("check inside ==int") if type(x) == float: jsonData[key] = float(jsonData[key]) except: pass df = json_normalize(jsonData) df = df.replace(r'^\s*$', np.NaN, regex=True) df = dd.from_pandas(df, npartitions=self.npartitions) except KeyError as e: output = {"status":"FAIL","message":str(e).strip('"')} print(json.dumps(output)) except Exception as e: output = {"status":"FAIL","message":str(e).strip('"')} print(json.dumps(output)) return df def readConfig(self): with open(self.configPath, 'r', encoding= 'utf8') as f: self.configDict = json.load(f) self.targetFeature = self.configDict['targetFeature'] self.trainingFeatures = self.configDict['trainingFeatures'] self.modelName = self.configDict["modelName"] self.modelVersion = self.configDict["modelVersion"] self.dataLocation = self.configDict["dataLocation"] self.deployLocation = self.configDict["deployLocation"] self.incProfilerPath = self.configDict["profilerLocation"] self.incSelectorPath = self.configDict["selectorLocation"] self.problemType = self.configDict['analysisType'] self.algorithm = self.configDict["algorithm"] self.modelPath = self.configDict["modelLocation"] self.scoringCriteria = self.configDict['scoringCriteria'] self.nworkers = int(self.configDict["n_workers"]) self.npartitions = int(self.configDict["npartitions"]) self.threadsPerWorker = int(self.configDict["threads_per_worker"]) def pickleLoad(self, file): if os.path.exists(file): with open(file, 'rb') as f: model = pickle.load(f) return model else: return None def loadSavedModels(self): self.incProfiler = self.pickleLoad(os.path.join(self.home, 'model',self.incProfilerPath)) if self.incSelectorPath != '': self.incSelector = self.pickleLoad(os.path.join(self.home, 'model',self.incSelectorPath)) self.model = self.pickleLoad(os.path.join(self.home, 'model',self.modelPath)) def dataFramePreProcess(self, df): df = df.replace(r'^\s*$', np.NaN, regex=True) df = df.replace('-', np.nan) df = df.replace('?', np.nan) return df def profiler(self, df): X = self.dataFramePreProcess(df) if 'self.targetFeature' in X: X = X.drop(self.targetFeature, axis=1) X = self.incProfiler.transform(X) if self.incSelectorPath != '': X = self.incSelector.transform(X.to_dask_array(lengths=True)) # X = dd.from_dask_array(X) return X def trainedModel(self,X): ParallelPostFit(estimator=self.model) # preds = self.model.predict(X) if self.algorithm=="Distributed Light Gradient Boosting (LightGBM)": X = X.to_dask_array(lengths=True) preds = self.model.predict(X).compute() return preds def apply_output_format(self,df,modeloutput): label_maping = None if self.problemType.lower() == 'regression': if not isinstance(modeloutput, np.ndarray): modeloutput = modeloutput.to_numpy() dask_arr = dd.from_array(modeloutput) dask_arr.name = 'prediction' df = df.merge(dask_arr.to_frame()) df['rounded_prediction'] = df['prediction'].round(2) elif self.problemType.lower() == 'classification': print("type: ", type(modeloutput)) if not isinstance(modeloutput, np.ndarray): modeloutput = modeloutput.to_numpy() dask_arr = dd.from_array(modeloutput) dask_arr.name = "prediction" df = df.merge(dask_arr.to_frame()) with open(self.deployLocation + "/etc/" + "label_mapping.json") as jsonfile: label_maping = json.load(jsonfile) df["prediction"] = df["prediction"].astype(int) df["prediction"] = df["prediction"].astype(str) df["prediction_label"] = df["prediction"].map(label_maping) if df["prediction_label"].dtype == None: df["prediction_label"] = df["prediction"] outputjson = df.compute().to_json(orient='records') outputjson = {"status":"SUCCESS","data":json.loads(outputjson)} return(json.dumps(outputjson)) def predict(self,data): try: self.readConfig() df = self.readData(data) dfOrg = df.copy() if len(self.configDict)!=0: self.loadSavedModels() df = self.profiler(df) modeloutput = self.trainedModel(df) # dfOrg = dfOrg[self.allFtrs] output = self.apply_output_format(dfOrg, modeloutput) else: pass except Exception as e: print(traceback.format_exc()) output = {"status":"FAIL","message":str(e).strip('"')} return output if __name__ == "__main__": incBPobj = incBatchPredictor() incBPobj.readConfig() nWorkers = incBPobj.get_nworkers() threads_per_worker = incBPobj.get_threadsPerWorker() cluster = dask.distributed.LocalCluster(n_workers=nWorkers, threads_per_worker=threads_per_worker) client = dask.distributed.Client(cluster) output = incBPobj.predict(sys.argv[1]) print("predictions:",output) client.close() cluster.close()
heBinary.py
# -*- coding: utf-8 -*- """ Created on Sat Sep 10 23:57:56 2022 @author: jayaramakrishnans """ import numpy as np import pandas as pd from secrets import token_bytes from ppxgboost import PaillierAPI as paillier from ppxgboost import BoosterParser as boostparser from ppxgboost import PPBooster as ppbooster from ppxgboost.PPBooster import MetaData from ppxgboost.PPKey import PPBoostKey # from ope.pyope.ope import OPE from pyope.ope import OPE import joblib import logging import os from flask import Flask,request,jsonify,render_template # import pickle from flask import Flask, request, jsonify import json import jsonpickle app = Flask(__name__) class server_ppxgboost: def __init__(self): # self.problemtype=problemtype self.confdata=None print("Inside server_ppxgboost_1\n") ## Loading config file def configload(self): print("Inside server_ppxgboost_1,configload\n") cwd = os.path.abspath(os.path.dirname(__file__)) file_name='config.json' try: config_file=os.path.normpath(os.path.join(cwd,'config',file_name)) except Exception as e: print("config path error. Error Msg: \n",e) with open(config_file, 'r') as file: data = json.load(file) model_name=str(data["model_name"]) # version=str(data["version"]) return model_name ## Load server xgboost model from ../model dir. def model_load( self,path): print("Inside server_ppxgboost_1,model_load\n") cwd = os.path.abspath(os.path.dirname(__file__)) file_name=path try: model_location=os.path.normpath(os.path.join(cwd,'model',file_name)) except Exception as e: print("Model path error. Error Msg: \n",e) # print(path) loaded_model = joblib.load(model_location) return loaded_model ## Generate Encrypted prediction fn def ppxgboostpredict_server(self,model,ppBoostKey,clientdata,min_max): xgboost_model = model meta_min_max = MetaData(min_max) p_trees, feature_set, min_max = boostparser.model_to_trees(xgboost_model, min_max) enc_trees = ppbooster.enc_xgboost_model(ppBoostKey, p_trees, MetaData(min_max)) enc_client_data=clientdata # enc_predictions = ppbooster.predict_binary(enc_trees, X_test) # should rename the function enc_predictions = ppbooster.predict_binary(p_trees, enc_client_data) return enc_predictions ## XGBoost wrapper for native model (native model to scikit learn xgboost model) def xgboostwrappertonative(self,wrappermodel): print("Inside server_ppxgboost_7,xgboostwrappertonative= \n",wrappermodel) nativemodel = wrappermodel.get_booster() return nativemodel def training_dataset_parser(self,train_data: pd.DataFrame): """ :param train_data: dataframe training data :return: minimum of the training dataset, and maximum of the training dataset. """ return {'min': np.min(pd.DataFrame.min(train_data)), 'max': np.max(pd.DataFrame.max(train_data))} ## Homomorphic secure main server cls_obj=server_ppxgboost() @app.route('/homomorphicprediction_server_api',methods=['GET','POST']) def main_server(): data = request.get_json(force=True) response_data = json.dumps(data) json_in= json.loads(response_data) values = json_in['values'] features=json_in['features'] ppBoostKey=jsonpickle.decode(json_in['key']) encrypted_clientdata=pd.DataFrame(values,columns =features) ## Create encrypted predition from model model=None min_max = {'min': 0, 'max': 1000} model_name = cls_obj.configload() # model_name=usecase_name model_location=model_name model_xgboost = cls_obj.model_load(model_location) try: ## For sklearn based xgboost model to native model model = cls_obj.xgboostwrappertonative(model_xgboost) except: ## For native xgboost,we dont need to get booster. model= model_xgboost ## FFor logging cwd = os.path.abspath(os.path.dirname(__file__)) # model_name=model_name file_name = model_name.rsplit('.', 1) file_name=file_name[0] file_name=file_name+".log" try: hm_log=os.path.normpath(os.path.join(cwd,'logs',file_name)) os.makedirs(os.path.dirname(hm_log), exist_ok=True) except Exception as e: print("Log path error. Error Msg: \n",e) logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) log = logging.getLogger('AION') log.info('Server binary class encryption service started...') print("Encrypted client data is ....\n") log.info("Encrypted client data is (received by server): \n"+str(encrypted_clientdata)) enc_predictions = cls_obj.ppxgboostpredict_server(model,ppBoostKey,encrypted_clientdata,min_max) log.info("server side encrypted prediction: \n"+str(enc_predictions)) ## Serialize the ppboost encrypted prediction by jsonpickle, normal pikle lib not working. enc_predictions_json = jsonpickle.encode(enc_predictions) # enc_predictions_json = enc_predictions.to_json() return enc_predictions_json if __name__ == '__main__': #app.run(debug=True) app.run(host="localhost", port=9000, debug=True)
heRegression.py
# -*- coding: utf-8 -*- """ Created on Sat Sep 10 23:57:56 2022 """ import numpy as np import pandas as pd from secrets import token_bytes from ppxgboost import PaillierAPI as paillier from ppxgboost import BoosterParser as boostparser from ppxgboost import PPBooster as ppbooster from ppxgboost.PPBooster import MetaData from ppxgboost.PPKey import PPBoostKey # from ope.pyope.ope import OPE from pyope.ope import OPE import joblib import logging import os from flask import Flask,request,jsonify,render_template # import pickle from flask import Flask, request, jsonify import json import jsonpickle import os.path import time import subprocess import sys from os.path import expanduser import ntpath import shutil import platform from pathlib import Path home = expanduser("~") if platform.system() == 'Windows': LOG_FILE_PATH = os.path.join(home,'AppData','Local','HCLT','AION','logs') else: LOG_FILE_PATH = os.path.join(home,'HCLT','AION','logs') app = Flask(__name__) class server_ppxgboost: def __init__(self): # self.problemtype=problemtype self.confdata=None ## Loading config file def configload(self): cwd = os.path.abspath(os.path.dirname(__file__)) file_name='config.json' try: config_file=os.path.normpath(os.path.join(cwd,'config',file_name)) except Exception as e: print("config path error. Error Msg: \n",e) with open(config_file, 'r') as file: data = json.load(file) model_name=str(data["model_name"]) # version=str(data["version"]) return model_name ## Load server xgboost model from ../model dir. def model_load( self,path): cwd = os.path.abspath(os.path.dirname(__file__)) file_name=path try: model_location=os.path.normpath(os.path.join(cwd,'model',file_name)) except Exception as e: print("Model path error. Error Msg: \n",e) # print(path) loaded_model = joblib.load(model_location) return loaded_model ## Generate Encrypted prediction fn def ppxgboostpredict_server(self,model,ppBoostKey,clientdata,min_max): xgboost_model = model meta_min_max = MetaData(min_max) p_trees, feature_set, min_max = boostparser.model_to_trees(xgboost_model, min_max) enc_trees = ppbooster.enc_xgboost_model(ppBoostKey, p_trees, MetaData(min_max)) enc_client_data=clientdata # enc_predictions = ppbooster.predict_binary(enc_trees, X_test) # should rename the function enc_predictions = ppbooster.predict_binary(p_trees, enc_client_data) return enc_predictions ## XGBoost wrapper for native model (native model to scikit learn xgboost model) def xgboostwrappertonative(self,wrappermodel): nativemodel = wrappermodel.get_booster() return nativemodel def training_dataset_parser(self,train_data: pd.DataFrame): """ :param train_data: dataframe training data :return: minimum of the training dataset, and maximum of the training dataset. """ return {'min': np.min(pd.DataFrame.min(train_data)), 'max': np.max(pd.DataFrame.max(train_data))} ## Homomorphic secure main server cls_obj=server_ppxgboost() @app.route('/homomorphicprediction_server_api',methods=['GET','POST']) def main_server(): data = request.get_json(force=True) response_data = json.dumps(data) json_in= json.loads(response_data) values = json_in['values'] features=json_in['features'] ppBoostKey=jsonpickle.decode(json_in['key']) encrypted_clientdata=pd.DataFrame(values,columns =features) ## Create encrypted predition from model model=None min_max = {'min': 0, 'max': 1000} model_name = cls_obj.configload() # model_name=usecase_name model_location=model_name model_xgboost = cls_obj.model_load(model_location) try: ## For sklearn based xgboost model to native model model = cls_obj.xgboostwrappertonative(model_xgboost) except: ## For native xgboost,we dont need to get booster. model= model_xgboost ## FFor logging cwd = os.path.abspath(os.path.dirname(__file__)) # model_name=model_name file_name = model_name.rsplit('.', 1) file_name=file_name[0] file_name=file_name+".log" try: hm_log=os.path.normpath(os.path.join(cwd,'logs',file_name)) os.makedirs(os.path.dirname(hm_log), exist_ok=True) except Exception as e: print("Log path error. Error Msg: \n",e) logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) log = logging.getLogger('AION') log.info('Server regression encryption service started...') print("Encrypted client data is ....\n") log.info("Encrypted client data is (received by server): \n"+str(encrypted_clientdata)) print("Client side encrypted data: \n",encrypted_clientdata) enc_predictions = cls_obj.ppxgboostpredict_server(model,ppBoostKey,encrypted_clientdata,min_max) log.info("server side encrypted prediction: \n"+str(enc_predictions)) ## Serialize the ppboost encrypted prediction by jsonpickle, normal pikle lib not working. enc_predictions_json = jsonpickle.encode(enc_predictions) # enc_predictions_json = enc_predictions.to_json() return enc_predictions_json if __name__ == '__main__': #app.run(debug=True) app.run(host="localhost", port=9000, debug=True)
heMulticlass.py
# -*- coding: utf-8 -*- """ Created on Sat Sep 10 23:57:56 2022 """ import numpy as np import sqlite3 import sys import pandas as pd from secrets import token_bytes from ppxgboost import PaillierAPI as paillier from ppxgboost import BoosterParser as boostparser from ppxgboost import PPBooster as ppbooster from ppxgboost.PPBooster import MetaData from ppxgboost.PPKey import PPBoostKey # from ope.pyope.ope import OPE from pyope.ope import OPE import joblib import logging import os from flask import Flask,request,jsonify,render_template # import pickle from flask import Flask, request, jsonify import json import jsonpickle import time from pathlib import Path app = Flask(__name__) class server_ppxgboost: def __init__(self): # self.problemtype=problemtype print("Inside server_ppxgboost_1,init\n") self.confdata=None ## Loading config file def configload(self): cwd = os.path.abspath(os.path.dirname(__file__)) file_name='secure_config.json' try: config_file=os.path.normpath(os.path.join(cwd,'etc',file_name)) except Exception as e: print("config path error. Error Msg: \n",e) with open(config_file, 'r') as file: data = json.load(file) model_name=str(data["model_name"]) # version=str(data["version"]) return model_name ## Load server xgboost model from ../model dir. def model_load( self,path): cwd = os.path.abspath(os.path.dirname(__file__)) file_name=path try: model_location=os.path.normpath(os.path.join(cwd,'model',file_name)) except Exception as e: print("Model path error. Error Msg: \n",e) # print(path) loaded_model = joblib.load(model_location) return loaded_model def create_connection(self,db_file): conn = None try: conn = sqlite3.connect(db_file) conn.execute('''CREATE TABLE IF NOT EXISTS modelinfo (key BLOB NOT NULL,encrypttree BLOB NOT NULL)''') except Exception as e: print(e) return conn def count_encrypt_model(self,conn): try: sql = "select count(*) from modelinfo" cur = conn.cursor() cur.execute(sql) cur_result = cur.fetchone() return cur_result[0] except Exception as e: print(e) def create_encryptmodel(self,conn,modeldetails): sql = ''' INSERT INTO modelinfo(key,encrypttree) VALUES(?,?) ''' cur = conn.cursor() cur.execute(sql, modeldetails) conn.commit() return cur.lastrowid def search_encryptmodel(self,conn,key): try: sql = "SELECT encrypttree FROM modelinfo where key='"+key+"'" cursor = conn.execute(sql) for row in cursor: return row[0] return '' except Exception as e: print(e) def ppxgboostpredict_server(self,model,ppBoostKey,clientdata,min_max,noofclasses): try: db_file = Path(__file__).parent/'logs'/'encryptedModels' conn = self.create_connection(db_file) enc_trees = self.search_encryptmodel(conn,jsonpickle.encode(ppBoostKey)) if enc_trees != '': enc_trees = jsonpickle.decode(enc_trees) else: if self.count_encrypt_model(conn) >= 5: outputjson = {"status":"ERROR","msg":"Maximum Number of Encrypted"} return json.dumps(outputjson) xgboost_model = model meta_min_max = MetaData(min_max) p_trees, feature_set, min_max = boostparser.model_to_trees(xgboost_model, min_max) enc_trees = ppbooster.enc_xgboost_model(ppBoostKey, p_trees, MetaData(min_max)) modelinfo = (jsonpickle.encode(ppBoostKey),jsonpickle.encode(enc_trees)) self.create_encryptmodel(conn,modelinfo) enc_client_data=clientdata # try: # num_classes = model.n_classes_ # except: # num_classes = noofclasses num_classes = noofclasses if num_classes == 0: nc_predictions = ppbooster.predict_binary(p_trees, enc_client_data) else: enc_predictions = ppbooster.predict_multiclass(enc_trees, num_classes, enc_client_data) enc_predictions_json = jsonpickle.encode(enc_predictions) outputjson = {"status":"SUCCESS","data":enc_predictions_json} return json.dumps(outputjson) except Exception as e: outputjson = {"status":"ERROR","msg":str(e)} return json.dumps(outputjson) ## XGBoost wrapper for native model (native model to scikit learn xgboost model) def xgboostwrappertonative(self,wrappermodel): nativemodel = wrappermodel.get_booster() try: noOfClasses = wrappermodel.n_classes_ except Exception as e: print(e) noOfClasses = 0 return nativemodel,noOfClasses def training_dataset_parser(self,train_data: pd.DataFrame): """ :param train_data: dataframe training data :return: minimum of the training dataset, and maximum of the training dataset. """ return {'min': np.min(pd.DataFrame.min(train_data)), 'max': np.max(pd.DataFrame.max(train_data))} ## Homomorphic secure main server cls_obj=server_ppxgboost() def spredict(data): try: json_in= json.loads(data) values = json_in['values'] features=json_in['features'] ppBoostKey=jsonpickle.decode(json_in['key']) encrypted_clientdata=pd.DataFrame(values,columns =features) model=None min_max = {'min': 0, 'max': 1000} model_name = cls_obj.configload() model_location=model_name model_xgboost = cls_obj.model_load(model_location) try: model,noofclasses = cls_obj.xgboostwrappertonative(model_xgboost) except Exception as e: print(e) model= model_xgboost noofclasses = 0 cwd = os.path.abspath(os.path.dirname(__file__)) # model_name=model_name file_name = model_name.rsplit('.', 1) file_name=file_name[0] file_name=file_name+".log" try: hm_log=os.path.normpath(os.path.join(cwd,'logs',file_name)) os.makedirs(os.path.dirname(hm_log), exist_ok=True) except Exception as e: print("Log path error. Error Msg: \n",e) logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) log = logging.getLogger('AION') log.info('Server multiclass classification encryption service started...') log.info("Encrypted client data is (received by server): \n"+str(encrypted_clientdata)) output = cls_obj.ppxgboostpredict_server(model,ppBoostKey,encrypted_clientdata,min_max,noofclasses) print("predictions:",output) print("Inside server_ppxgboost_8,output= \n",output) return output except Exception as e: outputjson = {"status":"ERROR","msg":str(e)} output = json.dumps(outputjson) print("predictions:",output) return output if __name__ == "__main__": output = spredict(sys.argv[1])
aion_hebinaryclient.py
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*- import pandas as pd import numpy as np import logging import os import sys from logging import INFO from script.heBinary import client_ppxgboost from script.aion_predict import selector from script.inputprofiler import inputprofiler ## Client main class for binary classification class aion_hebinaryclient: def __init__(self): self.confdata=None def configLoad(self,jsonfile): import json jsonfile=str(jsonfile) with open(jsonfile, 'r') as file: self.confdata = json.load(file) return self.confdata def dataload(self,datapath): df = pd.read_csv(datapath) ## Data preprocess in test dataset, In aion, aion profiler will handle it. # df =df[~df.isin([np.nan, np.inf, -np.inf]).any(1)] # df=df.reset_index(drop=True) profilerobj = inputprofiler() df = profilerobj.run(df) selectobj = selector() df = selectobj.run(df) return df # Start Flower server for n rounds of federated learning if __name__ == "__main__": classobj=aion_hebinaryclient() data_location=str(sys.argv[1]) df=classobj.dataload(data_location) # print("df: \n",df) aionhmcobj=client_ppxgboost(df) ppxgboost_pred=aionhmcobj.main_client() print("final decrypted prediction at client side: \n",ppxgboost_pred) # classobj.log.info("At client end, homomorphic prediction df: \n"+str(ppxgboost_pred)) # classobj.log.info("Aion homomorphic client encrypted prediction df: \n"+str(ppxgboost_pred))
aion_hemulticlient.py
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*- import pandas as pd import numpy as np import logging import os import sys from logging import INFO from script.heMulticlass import client_ppxgboost from script.aion_predict import selector from script.inputprofiler import inputprofiler import argparse class aion_hemulticlient: def __init__(self): self.confdata=None def dataload(self,datapath): df = pd.read_csv(datapath) ## Data preprocess in test dataset, In aion, aion profiler will handle it. # df =df[~df.isin([np.nan, np.inf, -np.inf]).any(1)] # df=df.reset_index(drop=True) profilerobj = inputprofiler() df = profilerobj.run(df) selectobj = selector() df = selectobj.run(df) return df if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-i', '--inputDataLocation', help='Input Data Path') parser.add_argument('-k', '--keyGenerate', help='True') parser.add_argument('-e', '--endPoint', help='Service End Point') args = parser.parse_args() if args.inputDataLocation: dataLocation=args.inputDataLocation if args.keyGenerate: keyGenerate = args.keyGenerate else: keyGenerate='False' print(keyGenerate) if args.endPoint: endPoint=args.endPoint else: raise('End Point Not Defined') classobj=aion_hemulticlient() df=classobj.dataload(dataLocation) aionhmcobj=client_ppxgboost(df,keyGenerate,endPoint) ppxgboost_pred=aionhmcobj.main_client() print("final decrypted prediction at client side: \n",ppxgboost_pred)
aion_heregressionclient.py
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*- import pandas as pd import numpy as np import logging import os import sys from logging import INFO from script.heRegression import client_ppxgboost from script.aion_predict import selector from script.inputprofiler import inputprofiler import argparse class aion_hemulticlient: def __init__(self): self.confdata=None def dataload(self,datapath): df = pd.read_csv(datapath) ## Data preprocess in test dataset, In aion, aion profiler will handle it. # df =df[~df.isin([np.nan, np.inf, -np.inf]).any(1)] # df=df.reset_index(drop=True) profilerobj = inputprofiler() df = profilerobj.run(df) selectobj = selector() df = selectobj.run(df) return df if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-i', '--inputDataLocation', help='Input Data Path') parser.add_argument('-k', '--keyGenerate', help='True') parser.add_argument('-e', '--endPoint', help='Service End Point') args = parser.parse_args() if args.inputDataLocation: dataLocation=args.inputDataLocation if args.keyGenerate: keyGenerate = args.keyGenerate else: keyGenerate='False' print(keyGenerate) if args.endPoint: endPoint=args.endPoint else: raise('End Point Not Defined') classobj=aion_hemulticlient() df=classobj.dataload(dataLocation) aionhmcobj=client_ppxgboost(df,keyGenerate,endPoint) ppxgboost_pred=aionhmcobj.main_client() print("final decrypted prediction at client side: \n",ppxgboost_pred)
heBinary.py
# -*- coding: utf-8 -*- import pandas as pd from sklearn.model_selection import train_test_split import numpy as np from secrets import token_bytes from ppxgboost import PaillierAPI as paillier from ppxgboost import BoosterParser as boostparser from ppxgboost import PPBooster as ppbooster from ppxgboost.PPBooster import MetaData from ppxgboost.PPKey import PPBoostKey # from ope.pyope.ope import OPE from pyope.ope import OPE import sys sys.path.insert(0, '..') import logging from logging import INFO import pickle import requests import json # from json import JSONEncoder import jsonpickle import os ##Aion main client class for ppxgboost based encryption,decryption class client_ppxgboost: def __init__(self,data): self.data=data self.prediction=None ## For logging cwd = os.path.abspath(os.path.dirname(__file__)) # model_name=model_name file_name = "he_binaryclass" file_name=file_name+".log" try: hm_log=os.path.normpath(os.path.join(cwd,'logs',file_name)) os.makedirs(os.path.dirname(hm_log), exist_ok=True) except Exception as e: print("Log path error. Error Msg: \n",e) logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) self.log = logging.getLogger('AION') self.log.info('Client binary class classification homomorphic encryption service started...') ## Loading configuration parameters, Not used now. def configload(self): try: data=self.confdata usecase_name=str(data["usecase_name"]) version=str(data["version"]) problem_type=data["problem_type"] model_location=str(data["model_location"]) data_location=str(data["data_location"]) selected_feature=data["selected_feature"] if (type(selected_feature) is str): selected_feature=selected_feature.split(',') target_feature=data["target_feature"] client_encryption_accuracy=str(data["client_encryption_accuracy"]) test_size=int(data["test_size"]) test_size=test_size/100 except Exception as e: self.log.info("Reading server config file issue. Err.Msg: %s "+str(e)) return usecase_name,data_location,model_location,problem_type,version,selected_feature,target_feature,client_encryption_accuracy,test_size ## Load the model, Not used at client now. def model_load(self, path): loaded_model = pickle.load(open(path, 'rb')) return loaded_model #Generating secure key def generate_ppboostkey(self): public_key, private_key = paillier.he_key_gen() prf_key = token_bytes(16) OPE_key = token_bytes(16) encrypter = OPE(OPE_key) ppBoostKey = PPBoostKey(public_key, prf_key, encrypter) return public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey ## Binary client prediction (decrypted prediction) def ppxgboostbinary_predict(self,enc_predictions,private_key): dec = ppbooster.client_decrypt(private_key, enc_predictions) ##For binary classification res = ppbooster.client_decrypt_prediction_binary(private_key, enc_predictions) res_df=pd.DataFrame(res) return res # class ppkeyEncoder(JSONEncoder): # def default(self,o): # return o.__dict__ ## Function to connect secure server via flask restapi (send enc data and receive enc prediction.) def connect_xgboostserver(self,ppBoostKey,encrypted_xtest): url = 'http://localhost:9000//homomorphicprediction_server_api' enc_dict={} # df_list=[encrypted_xtest.columns.values.tolist()]+df.values.tolist() enc_dict['values']=encrypted_xtest.values.tolist() enc_dict['features']=encrypted_xtest.columns.values.tolist() enc_dict['key']= jsonpickle.encode(ppBoostKey) json_out=json.dumps(enc_dict,indent=4) headers = { 'content-type': "application/json", 'cache-control': "no-cache" } r = requests.post(url,data=json_out,headers=headers) enc_predictions_obj=jsonpickle.decode(r.content) return enc_predictions_obj ## Create PaillierAPI based encrypted user given data , here, testdata=userdata def generate_encrypted_testdata(self,prf_key,encrypter,testdata,min_max): feature_set_testdata=set(testdata.columns) ppbooster.enc_input_vector(prf_key, encrypter, feature_set_testdata, testdata, MetaData(min_max)) return testdata ## Create min and max of testdata df for pailler encryption,decryption def training_dataset_parser(self, client_data: pd.DataFrame): """ :param client_data: dataframe training data :return: minimum of the training dataset, and maximum of the training dataset. """ return {'min': np.min(pd.DataFrame.min(client_data)), 'max': np.max(pd.DataFrame.max(client_data))} ## Main client function call for enc data, send data to server, receive enc pred, finally decrypt prediction def main_client(self): self.log.info('Client actual data sample (displaying last 10 values) : \n'+str(self.data.tail(10))) print(" Client actual data sample (displaying last 10 values) : \n",self.data.tail(10)) public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey = self.generate_ppboostkey() min_max = self.training_dataset_parser(self.data) meta_min_max = MetaData(min_max) encrypted_testdata = self.generate_encrypted_testdata(prf_key,encrypter,self.data,min_max) # print("Sending encrypted client data to server....\n") print("\n Client side encrypted input data to server (displaying last 10 rows): \n",encrypted_testdata.tail(10)) self.log.info('Client side encrypted input data to server (displaying last 10 rows): \n'+str(encrypted_testdata.tail(10))) enc_predictions = self.connect_xgboostserver(ppBoostKey,encrypted_testdata) print("\n Encrypted prediction from server (displaying last 10 values.): \n",enc_predictions[-10:]) self.log.info('\n Encrypted prediction from server (displaying last 10 values.): \n'+str(enc_predictions[-10:])) ## Decrypted predition dec = self.ppxgboostbinary_predict(enc_predictions,private_key) # ppxgboost_pred=pd.DataFrame(list(zip(dec, predictions)),columns =['homomorphic_prediction', 'actual_prediction']) ppxgboost_pred=pd.DataFrame(dec,columns =['homomorphic_prediction']) self.log.info("final decrypted prediction at client side:: \n"+str(ppxgboost_pred)) return ppxgboost_pred ## For standalone testing if __name__ == '__main__': problemtype='Binary classification' data=None targetfeature=None ppxgboost_client_obj=client_ppxgboost(problemtype,data,targetfeature) ppxgboost_dec_predictions = ppxgboost_client_obj.main_client() # print("In main: ppxgboost_dec_predictions: \n",ppxgboost_dec_predictions)
heRegression.py
# -*- coding: utf-8 -*- import pandas as pd from sklearn.model_selection import train_test_split import numpy as np from secrets import token_bytes from ppxgboost import PaillierAPI as paillier from ppxgboost import BoosterParser as boostparser from ppxgboost import PPBooster as ppbooster from ppxgboost.PPBooster import MetaData from ppxgboost.PPKey import PPBoostKey # from ope.pyope.ope import OPE from pyope.ope import OPE import sys sys.path.insert(0, '..') import logging from logging import INFO import pickle import requests import json # from json import JSONEncoder import jsonpickle import os from pathlib import Path ##Aion main client class for ppxgboost based encryption,decryption class client_ppxgboost: def __init__(self,data,keyGenerate,endPoint): self.data=data self.keyGenerate = keyGenerate self.endPoint = endPoint self.prediction=None ## For logging clientDirectory = os.path.abspath(os.path.dirname(__file__)) # model_name=model_name file_name = "he_regression" file_name=file_name+".log" self.keydir=os.path.join(clientDirectory,'..','keys') os.makedirs(self.keydir, exist_ok=True) try: hm_log=os.path.normpath(os.path.join(clientDirectory,'logs',file_name)) os.makedirs(os.path.dirname(hm_log), exist_ok=True) except Exception as e: print("Log path error. Error Msg: \n",e) logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) self.log = logging.getLogger('AION') self.log.info('Client Regression homomorphic encryption service started...') ## Loading configuration parameters, Not used now. def configload(self): try: data=self.confdata usecase_name=str(data["usecase_name"]) version=str(data["version"]) problem_type=data["problem_type"] model_location=str(data["model_location"]) data_location=str(data["data_location"]) selected_feature=data["selected_feature"] if (type(selected_feature) is str): selected_feature=selected_feature.split(',') target_feature=data["target_feature"] client_encryption_accuracy=str(data["client_encryption_accuracy"]) test_size=int(data["test_size"]) test_size=test_size/100 except Exception as e: self.log.info("Reading server config file issue. Err.Msg: %s "+str(e)) return usecase_name,data_location,model_location,problem_type,version,selected_feature,target_feature,client_encryption_accuracy,test_size ## Load the model, Not used at client now. def model_load(self, path): loaded_model = pickle.load(open(path, 'rb')) return loaded_model #Generating secure key def generate_ppboostkey(self): try: public_key_file = Path(__file__).parent.parent/'keys'/'public.k' private_key_file = Path(__file__).parent.parent/'keys'/'private.k' prf_key_file = Path(__file__).parent.parent/'keys'/'prf.k' ope_key_file = Path(__file__).parent.parent/'keys'/'ope.k' encryptor_key_file = Path(__file__).parent.parent/'keys'/'encryptor.k' boostkey_key_file = Path(__file__).parent.parent/'keys'/'boostkey.k' if not boostkey_key_file.exists() or self.keyGenerate == 'True': public_key, private_key = paillier.he_key_gen() pub_file = open(public_key_file, 'w') pub_file.write(jsonpickle.encode(public_key)) pri_file = open(private_key_file, 'w') pri_file.write(jsonpickle.encode(private_key)) prf_key = token_bytes(16) OPE_key = token_bytes(16) prf_file = open(prf_key_file, 'w') prf_file.write(jsonpickle.encode(prf_key)) ope_file = open(ope_key_file, 'w') ope_file.write(jsonpickle.encode(OPE_key)) encrypter = OPE(OPE_key) enc_file = open(encryptor_key_file, 'w') enc_file.write(jsonpickle.encode(encrypter)) ppBoostKey = PPBoostKey(public_key, prf_key, encrypter) boost_file = open(boostkey_key_file, 'w') boost_file.write(jsonpickle.encode(ppBoostKey)) else: pub_file = open(public_key_file, 'r') public_key = jsonpickle.decode(pub_file.read()) pub_file.close() pri_file = open(private_key_file, 'r') private_key = jsonpickle.decode(pri_file.read()) pri_file.close() prf_file = open(prf_key_file, 'r') prf_key = jsonpickle.decode(prf_file.read()) prf_file.close() ope_file = open(ope_key_file, 'r') OPE_key = jsonpickle.decode(ope_file.read()) ope_file.close() enc_file = open(encryptor_key_file, 'r') encrypter = jsonpickle.decode(enc_file.read()) enc_file.close() boost_file = open(boostkey_key_file, 'r') ppBoostKey = jsonpickle.decode(boost_file.read()) boost_file.close() return public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey except Exception as e: print(e) def ppxgboostreg_predict(self,enc_predictions,private_key): dec = [] for p in enc_predictions: dec.append(paillier.decrypt(private_key, p)) dec_df=pd.DataFrame(dec) return dec # class ppkeyEncoder(JSONEncoder): # def default(self,o): # return o.__dict__ ## Function to connect secure server via flask restapi (send enc data and receive enc prediction.) def connect_xgboostserver(self,ppBoostKey,encrypted_xtest): url = self.endPoint enc_dict={} # df_list=[encrypted_xtest.columns.values.tolist()]+df.values.tolist() enc_dict['values']=encrypted_xtest.values.tolist() enc_dict['features']=encrypted_xtest.columns.values.tolist() enc_dict['key']= jsonpickle.encode(ppBoostKey) json_out=json.dumps(enc_dict,indent=4) headers = { 'content-type': "application/json", 'cache-control': "no-cache" } response = requests.post(url,auth=('admin','aion'),data=json_out,headers=headers) #print(response.content) outputStr=response.content outputStr = outputStr.decode('utf-8') outputStr = outputStr.strip() predict_dict = json.loads(str(outputStr)) if (predict_dict['status'] == 'SUCCESS'): data = predict_dict['data'] enc_predictions_ob=jsonpickle.decode(data) return enc_predictions_ob else: print('Error') ## Create PaillierAPI based encrypted user given data , here, testdata=userdata def generate_encrypted_testdata(self,prf_key,encrypter,testdata,min_max): feature_set_testdata=set(testdata.columns) ppbooster.enc_input_vector(prf_key, encrypter, feature_set_testdata, testdata, MetaData(min_max)) return testdata ## Create min and max of testdata df for pailler encryption,decryption def training_dataset_parser(self, client_data: pd.DataFrame): """ :param client_data: dataframe training data :return: minimum of the training dataset, and maximum of the training dataset. """ return {'min': np.min(pd.DataFrame.min(client_data)), 'max': np.max(pd.DataFrame.max(client_data))} ## Main client function call for enc data, send data to server, receive enc pred, finally decrypt prediction def main_client(self): self.log.info('Client actual data sample (displaying last 10 values) : \n'+str(self.data.tail(10))) print(" Client actual data sample (displaying last 10 values) : \n",self.data.tail(10)) public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey = self.generate_ppboostkey() min_max = self.training_dataset_parser(self.data) meta_min_max = MetaData(min_max) encrypted_testdata = self.generate_encrypted_testdata(prf_key,encrypter,self.data,min_max) # print("Sending encrypted client data to server....\n") print("\n Client side encrypted input data to server (displaying last 10 rows): \n",encrypted_testdata.tail(10)) self.log.info('Client side encrypted input data to server (displaying last 10 rows): \n'+str(encrypted_testdata.tail(10))) enc_predictions = self.connect_xgboostserver(ppBoostKey,encrypted_testdata) print("\n Encrypted prediction from server (displaying last 10 values.): \n",enc_predictions[-10:]) self.log.info('\n Encrypted prediction from server (displaying last 10 values.): \n'+str(enc_predictions[-10:])) ## Decrypted predition dec = self.ppxgboostreg_predict(enc_predictions,private_key) # ppxgboost_pred=pd.DataFrame(list(zip(dec, predictions)),columns =['homomorphic_prediction', 'actual_prediction']) ppxgboost_pred=pd.DataFrame(dec,columns =['homomorphic_prediction']) # print("final decrypted prediction at client side: \n",ppxgboost_pred) self.log.info("Final decrypted prediction at client side:: \n"+str(ppxgboost_pred)) return ppxgboost_pred ## For standalone testing if __name__ == '__main__': problemtype='regression' data=None targetfeature=None ppxgboost_client_obj=client_ppxgboost(problemtype,data,targetfeature) ppxgboost_dec_predictions = ppxgboost_client_obj.main_client() print("In main: ppxgboost_dec_predictions: \n",ppxgboost_dec_predictions)
heMulticlass.py
# -*- coding: utf-8 -*- import pandas as pd from sklearn.model_selection import train_test_split import numpy as np from secrets import token_bytes from ppxgboost import PaillierAPI as paillier from ppxgboost import BoosterParser as boostparser from ppxgboost import PPBooster as ppbooster from ppxgboost.PPBooster import MetaData from ppxgboost.PPKey import PPBoostKey # from ope.pyope.ope import OPE from pyope.ope import OPE import sys sys.path.insert(0, '..') import logging from logging import INFO import pickle import requests import json # from json import JSONEncoder import jsonpickle import os from pathlib import Path ##Aion main client class for ppxgboost based encryption,decryption class client_ppxgboost: def __init__(self,data,keyGenerate,endPoint): self.data=data self.keyGenerate = keyGenerate self.endPoint = endPoint self.prediction=None ## For logging clientDirectory = os.path.abspath(os.path.dirname(__file__)) # model_name=model_name file_name = "he_multiclass" file_name=file_name+".log" self.keydir=os.path.join(clientDirectory,'..','keys') os.makedirs(self.keydir, exist_ok=True) try: hm_log=os.path.normpath(os.path.join(clientDirectory,'logs',file_name)) os.makedirs(os.path.dirname(hm_log), exist_ok=True) except Exception as e: print("Log path error. Error Msg: \n",e) logging.basicConfig(filename=hm_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) self.log = logging.getLogger('AION') self.log.info('Client Multi class classification homomorphic encryption service started...') ## Loading configuration parameters, Not used now. def configload(self): try: data=self.confdata usecase_name=str(data["usecase_name"]) version=str(data["version"]) problem_type=data["problem_type"] model_location=str(data["model_location"]) data_location=str(data["data_location"]) selected_feature=data["selected_feature"] if (type(selected_feature) is str): selected_feature=selected_feature.split(',') target_feature=data["target_feature"] client_encryption_accuracy=str(data["client_encryption_accuracy"]) test_size=int(data["test_size"]) test_size=test_size/100 except Exception as e: self.log.info("Reading server config file issue. Err.Msg: %s "+str(e)) return usecase_name,data_location,model_location,problem_type,version,selected_feature,target_feature,client_encryption_accuracy,test_size ## Load the model, Not used at client now. def model_load(self, path): loaded_model = pickle.load(open(path, 'rb')) return loaded_model #Generating secure key def generate_ppboostkey(self): try: public_key_file = Path(__file__).parent.parent/'keys'/'public.k' private_key_file = Path(__file__).parent.parent/'keys'/'private.k' prf_key_file = Path(__file__).parent.parent/'keys'/'prf.k' ope_key_file = Path(__file__).parent.parent/'keys'/'ope.k' encryptor_key_file = Path(__file__).parent.parent/'keys'/'encryptor.k' boostkey_key_file = Path(__file__).parent.parent/'keys'/'boostkey.k' if not boostkey_key_file.exists() or self.keyGenerate == 'True': public_key, private_key = paillier.he_key_gen() pub_file = open(public_key_file, 'w') pub_file.write(jsonpickle.encode(public_key)) pri_file = open(private_key_file, 'w') pri_file.write(jsonpickle.encode(private_key)) prf_key = token_bytes(16) OPE_key = token_bytes(16) prf_file = open(prf_key_file, 'w') prf_file.write(jsonpickle.encode(prf_key)) ope_file = open(ope_key_file, 'w') ope_file.write(jsonpickle.encode(OPE_key)) encrypter = OPE(OPE_key) enc_file = open(encryptor_key_file, 'w') enc_file.write(jsonpickle.encode(encrypter)) ppBoostKey = PPBoostKey(public_key, prf_key, encrypter) boost_file = open(boostkey_key_file, 'w') boost_file.write(jsonpickle.encode(ppBoostKey)) else: pub_file = open(public_key_file, 'r') public_key = jsonpickle.decode(pub_file.read()) pub_file.close() pri_file = open(private_key_file, 'r') private_key = jsonpickle.decode(pri_file.read()) pri_file.close() prf_file = open(prf_key_file, 'r') prf_key = jsonpickle.decode(prf_file.read()) prf_file.close() ope_file = open(ope_key_file, 'r') OPE_key = jsonpickle.decode(ope_file.read()) ope_file.close() enc_file = open(encryptor_key_file, 'r') encrypter = jsonpickle.decode(enc_file.read()) enc_file.close() boost_file = open(boostkey_key_file, 'r') ppBoostKey = jsonpickle.decode(boost_file.read()) boost_file.close() return public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey except Exception as e: print(e) ## PPboost multiclass prediction fn def ppxgboostmulticlass_predict(self,enc_predictions,private_key): ##For binary classification # res = ppbooster.client_decrypt_prediction_binary(private_key, enc_predictions) ## For multiclass classification res = ppbooster.client_decrypt_prediction_multiclass(private_key, enc_predictions) return res # class ppkeyEncoder(JSONEncoder): # def default(self,o): # return o.__dict__ ## Function to connect secure server via flask restapi (send enc data and receive enc prediction.) def connect_xgboostserver(self,ppBoostKey,encrypted_xtest): url = self.endPoint enc_dict={} # df_list=[encrypted_xtest.columns.values.tolist()]+df.values.tolist() enc_dict['values']=encrypted_xtest.values.tolist() enc_dict['features']=encrypted_xtest.columns.values.tolist() enc_dict['key']= jsonpickle.encode(ppBoostKey) json_out=json.dumps(enc_dict,indent=4) headers = { 'content-type': "application/json", 'cache-control': "no-cache" } response = requests.post(url,auth=('admin','aion'),data=json_out,headers=headers) #print(response.content) outputStr=response.content outputStr = outputStr.decode('utf-8') outputStr = outputStr.strip() predict_dict = json.loads(str(outputStr)) if (predict_dict['status'] == 'SUCCESS'): data = predict_dict['data'] enc_predictions_ob=jsonpickle.decode(data) return enc_predictions_ob else: print('Error') ## Create PaillierAPI based encrypted user given data , here, testdata=userdata def generate_encrypted_testdata(self,prf_key,encrypter,testdata,min_max): feature_set_testdata=set(testdata.columns) ppbooster.enc_input_vector(prf_key, encrypter, feature_set_testdata, testdata, MetaData(min_max)) return testdata ## Create min and max of testdata df for pailler encryption,decryption def training_dataset_parser(self, client_data: pd.DataFrame): """ :param client_data: dataframe training data :return: minimum of the training dataset, and maximum of the training dataset. """ return {'min': np.min(pd.DataFrame.min(client_data)), 'max': np.max(pd.DataFrame.max(client_data))} ## Main client function call for enc data, send data to server, receive enc pred, finally decrypt prediction def main_client(self): self.log.info('Client actual data sample (displaying last 10 values) : \n'+str(self.data.tail(10))) #print(" Client actual data sample (displaying last 10 values) : \n",self.data.tail(10)) public_key,private_key,prf_key,OPE_key,encrypter,ppBoostKey = self.generate_ppboostkey() min_max = self.training_dataset_parser(self.data) meta_min_max = MetaData(min_max) #print('++++++++++++++++++++++++++++') encrypted_testdata = self.generate_encrypted_testdata(prf_key,encrypter,self.data,min_max) # print("Sending encrypted client data to server....\n") #print("\n Client side encrypted input data to server (displaying last 10 rows): \n",encrypted_testdata.tail(10)) self.log.info('Client side encrypted input data to server (displaying last 10 rows): \n'+str(encrypted_testdata.tail(10))) enc_predictions = self.connect_xgboostserver(ppBoostKey,encrypted_testdata) #print("\n Encrypted prediction from server (displaying last 10 values.): \n",enc_predictions[-10:]) #self.log.info('\n Encrypted prediction from server (displaying last 10 values.): \n'+str(enc_predictions[-10:])) ## Decrypted predition dec = self.ppxgboostmulticlass_predict(enc_predictions,private_key) # ppxgboost_pred=pd.DataFrame(list(zip(dec, predictions)),columns =['homomorphic_prediction', 'actual_prediction']) ppxgboost_pred=pd.DataFrame(dec,columns =['homomorphic_prediction']) self.log.info("final decrypted prediction at client side:: \n"+str(ppxgboost_pred)) return ppxgboost_pred ## For standalone testing if __name__ == '__main__': problemtype='Multi class classification' data=None targetfeature=None ppxgboost_client_obj=client_ppxgboost(problemtype,data,targetfeature) ppxgboost_dec_predictions = ppxgboost_client_obj.main_client() # print("In main: ppxgboost_dec_predictions: \n",ppxgboost_dec_predictions)
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
explainable_ai.py
from script.inputprofiler import inputprofiler def preprocessing(data): profilerobj = inputprofiler() data = profilerobj.run(data) data = data.astype(np.float64) return(data) import matplotlib.pyplot as plt try: from sklearn.externals import joblib except: import joblib import os,sys import pandas as pd from alibi.explainers import ALE,plot_ale import io import json import urllib, base64 import numpy as np from scipy.stats import linregress from statistics import mean def get_ranked_values(explanation): ranked_shap_vals = [] for cls_idx in range(len(explanation.shap_values)): this_ranking = ( explanation.raw['importances'][str(cls_idx)]['ranked_effect'], explanation.raw['importances'][str(cls_idx)]['names'] ) ranked_shap_vals.append(this_ranking) return ranked_shap_vals def feature_importance_using_shap(model,X,featuresNames,classes,x_test,x_test_waterfall): from alibi.explainers import KernelShap import shap shap.initjs() if hasattr(model, "decision_function"): pred_fcn = model.decision_function elif hasattr(model, "predict_proba"): pred_fcn = model.predict_proba else: pred_fcn = model.predict try: svm_explainer = KernelShap(pred_fcn,feature_names=featuresNames) xtest = x_test[0].reshape(1, -1) svm_explainer.fit(X,n_background_samples=100) svm_explanation = svm_explainer.explain(xtest) try: idx = 0 instance = x_test[0][None, :] pred = model.predict(instance) class_idx = pred.item() if isinstance(svm_explainer.expected_value,np.ndarray): forceplot = shap.force_plot(svm_explainer.expected_value[class_idx],svm_explanation.shap_values[class_idx][idx,:],instance,feature_names=featuresNames,matplotlib=True,show=False) else: forceplot = shap.force_plot(svm_explainer.expected_value,svm_explanation.shap_values[0][idx,:],instance,feature_names=featuresNames,matplotlib=True,show=False) plt.tight_layout(pad = 0) image = io.BytesIO() plt.savefig(image, format='png') image.seek(0) string = base64.b64encode(image.read()) image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) except Exception as inst: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) image_64 = '' try: plt.clf() if isinstance(svm_explainer.expected_value,np.ndarray): r = shap.multioutput_decision_plot(svm_explainer.expected_value.tolist(), svm_explanation.shap_values, idx, feature_names=featuresNames, feature_order='importance', highlight=[class_idx], legend_labels=classes, return_objects=True, legend_location='lower right',show=False) else: expectedvalues = [svm_explainer.expected_value] r = shap.multioutput_decision_plot(expectedvalues, svm_explanation.shap_values, idx, feature_names=featuresNames, highlight = [0], return_objects=True, legend_labels=['Value'], feature_order='importance', show=False) plt.tight_layout(pad = 0) image = io.BytesIO() plt.savefig(image, format='png') image.seek(0) string = base64.b64encode(image.read()) image2_64 = 'data:image/png;base64,' + urllib.parse.quote(string) except Exception as inst: print(inst) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) image2_64 = '' except Exception as inst: print(inst) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) image2_64 = '' image_64 = '' try: plt.clf() x_test_waterfall = x_test_waterfall[featuresNames] explainer = shap.Explainer(model.predict, x_test_waterfall, feature_names=featuresNames) shap_values = explainer(x_test) r = shap.plots.waterfall(shap_values[0], show=False) image = io.BytesIO() plt.savefig(image, format='png', bbox_inches='tight') image.seek(0) string = base64.b64encode(image.read()) image3_64 = 'data:image/png;base64,' + urllib.parse.quote(string) except Exception as inst: print(inst) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) image3_64 = '' return (image_64, image2_64, image3_64) def feature_importance(xtrain,ytrain,xfeatures,yfeature,problemType): if problemType == 'classification': from sklearn.feature_selection import SelectFromModel from sklearn.ensemble import ExtraTreesClassifier selector = SelectFromModel(ExtraTreesClassifier()) selector.fit(xtrain,ytrain) values = selector.estimator_.feature_importances_ elif problemType == 'regression': from sklearn.feature_selection import SelectFromModel from sklearn.linear_model import Lasso selector = SelectFromModel(Lasso()) selector.fit(xtrain,ytrain) values = np.abs(selector.estimator_.coef_) labels = xtrain.columns.tolist() dft = pd.DataFrame() dft['labels'] = labels dft['values'] = values maxrecord = dft.iloc[dft['values'].argmax()] mostimportantfeature = maxrecord['labels'] f_imp = dft.to_json(orient='records') return(f_imp,mostimportantfeature) def get_trust_score(prdictfn,proba_fun,X_train,y_train): from alibi.confidence import TrustScore ts = TrustScore(k_filter=10,alpha=.05,filter_type='distance_knn',leaf_size=40,metric='euclidean',dist_filter_type='point') ts.fit(X_train, y_train, classes=3) y_pred = prdictfn(X_train) #y_prod = proba_fun(X_train) #probas = y_prod[range(len(y_pred)), y_pred] score, closest_class = ts.score(X_train, y_pred,k=2,dist_type='point') return(mean(score)) def getCounterFactuals(model,prdictfn,features,x_train,categories): from alibi.explainers import CounterFactualProto cat_vars_ord = {} categoryList=categories.keys().tolist() categoryCountList=categories.tolist() for i in range(0,len(categoryCountList)): cat_vars_ord[categoryList[i]] = categoryCountList[i] print(cat_vars_ord) X = x_train[0].reshape((1,) + x_train[0].shape) shape = X.shape print(shape) beta = .01 c_init = 1. c_steps = 5 max_iterations = 500 rng = (-1., 1.) # scale features between -1 and 1 feature_range = (x_train.min(axis=0), x_train.max(axis=0)) cf = CounterFactualProto(prdictfn,shape,cat_vars=cat_vars_ord) explanation = cf.explain(X) print(explanation) def getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap, class_percent=None): threshold = 0.95 from alibi.explainers import AnchorTabular explainer = AnchorTabular(predict_fn, features) explainer.fit(X_train.values) X_test = X_test.values anchors = [] for idx in range(len(X_test)): prediction = explainer.predictor(X_test[idx].reshape(1, -1))[0] if len(labelMap) > 0: predictionstr = list(labelMap.keys())[list(labelMap.values()).index(prediction)] else: predictionstr = prediction explanation = explainer.explain(X_test[idx],threshold=threshold) if str(explanation.anchor) == '[]': if class_percent and class_percent.get(prediction, 0.0) > threshold: anchor = f"Cannot explain the prediction of this class ({predictionstr}) since there is no salient subset of features that is necessary for the prediction to hold. This behaviour is typical when the data is very imbalanced and is seen for the majority class in a classification problem." else: anchor = f'Can not get the explanation for {predictionstr}.' precision = explanation.precision[0] else: anchor = '%s' % (' AND '.join(explanation.anchor)) precision = explanation.precision coverage = explanation.coverage anchorjson = {} anchorjson['features'] = eval(str(features)) anchorjson['values'] = eval(str(list(X_test[idx]))) anchorjson['prediction'] = str(predictionstr) anchorjson['precision'] = str(round(precision,2)) anchorjson['anchor'] = anchor anchors.append(anchorjson) print(anchors) try: return(json.dumps(anchors)) except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return(json.dumps({})) def ale_analysis(): displaypath = os.path.join(os.path.dirname(os.path.abspath(__file__)),"etc","display.json") with open(displaypath) as file: config = json.load(file) file.close() model = joblib.load(os.path.join(os.path.dirname(os.path.abspath(__file__)),"model",config['saved_model'])) predict_fn = lambda x: model.predict(x) predictproba_fn = lambda x: model.predict_proba(x) dathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','postprocesseddata.csv.gz') dataFrame=pd.read_csv(dathPath,compression='gzip') #dataFrame = pd.read_csv(dathPath) testdathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','predicteddata.csv.gz') testdataFrame=pd.read_csv(testdathPath,compression='gzip') #testdataFrame = pd.read_csv(testdathPath) features = config['modelFeatures']#['SepalWidthCm','PetalLengthCm'] targetfeature = config['targetFeature']#'Species' labelMap = config['labelMaps'] targetData = dataFrame[targetfeature] if config['problemType'].lower() == 'regression': X_train = dataFrame[features] X_test = testdataFrame.head(5) X_test = X_test[features] else: valueCount=targetData.value_counts() class_percent = (valueCount/ len(targetData)).to_dict() categoryList=valueCount.keys().tolist() class_names = categoryList X_train = dataFrame[features] X_test = testdataFrame.groupby('predict').first().reset_index() X_test = X_test[features] f_imp,m_imp_f = feature_importance(X_train,targetData,features,targetfeature,config['problemType'].lower()) if hasattr(model, "decision_function"): logit_fun_lr = model.decision_function try: logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=categoryList) logit_exp_lr = logit_ale_lr.explain(X_train.values) values = logit_exp_lr.ale_values feature = logit_exp_lr.feature_names feature_values = logit_exp_lr.feature_values lines= [] sentences = [] for x in range(0,len(feature)): f_value = feature_values[x] value = values[x] lines= [] for y in range(0,len(class_names)): line = [] for z in value: cordinate = z[y] line.append(cordinate) lines.append(line) line = lines[0] slope1, intercept1, r_value, p_value, std_err = linregress(f_value,line) line = lines[1] slope2, intercept2, r_value, p_value, std_err = linregress(f_value,line) xi = (intercept1-intercept2) / (slope2-slope1) xi = round(xi,2) lastvalues = {} i = 0 for line in lines: value = line[len(line)-1] lastvalues[class_names[i]] = value i = i+1 Keymax = max(lastvalues, key=lastvalues.get) Keymin = min(lastvalues, key=lastvalues.get) Keymaxclass = list(labelMap.keys())[list(labelMap.values()).index(Keymax)] Keyminclass = list(labelMap.keys())[list(labelMap.values()).index(Keymin)] sentense = '<b>Effect of '+str(feature[x])+'</b><br>For data samples having <b>'+str(feature[x])+'</b> >= <b>~'+str(xi)+'</b> ,there is a very high chance that they are of class <b>'+str(Keymaxclass)+'</b> '+targetfeature+'. For data samples having <b>'+str(feature[x])+'</b> < <b>~'+str(xi)+'</b> there is a very high change that they are of class <b>'+str(Keyminclass)+'</b> '+targetfeature+'.' sentences.append(sentense) except: sentense = '' sentences.append(sentense) xi = 0 elif hasattr(model, "predict_proba"): logit_fun_lr = model.predict_proba logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=categoryList) logit_exp_lr = logit_ale_lr.explain(X_train.values) values = logit_exp_lr.ale_values feature = logit_exp_lr.feature_names feature_values = logit_exp_lr.feature_values lines= [] sentences = [] sentense = 'Graphs gives a feature value how much more(less) probability does the model assign to each class relative to mean prediction. This also means that any increase in relative probability of one class must result into a decrease in probability of another class.' sentences.append(sentense) xi = 0 elif hasattr(model, "predict"): logit_fun_lr = model.predict logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=['Value']) logit_exp_lr = logit_ale_lr.explain(X_train.values) values = logit_exp_lr.ale_values feature = logit_exp_lr.feature_names feature_values = logit_exp_lr.feature_values lines= [] sentences = [] sentense = 'The ALE value corresponding to that feature value is difference to the mean effect of that feature. Put differently, the ALE value is the relative feature effect on the prediction at that feature value.' sentences.append(sentense) xi = 0 if (len(features)%2 ==0): n_cols = int(len(features)/2) else: n_cols = int(len(features)/2)+1 figheight = n_cols*3 try: plot_ale(logit_exp_lr,n_cols=2, fig_kw={'figwidth': 8, 'figheight': figheight}) plt.tight_layout(pad = 0) image = io.BytesIO() plt.savefig(image, format='png') image.seek(0) string = base64.b64encode(image.read()) image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) except: image_64 = '' #score = get_trust_score(model.predict,proba_fun_lr,X_train.values,targetData.values) if config['problemType'].lower() == 'classification': anchorjson = getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap,class_percent) else: anchorjson = getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap) #anchors=[] #anchorjson = json.dumps(anchors) #feature_importance_using_shap(model,X_train.values,features,class_names) #getCounterFactuals(model,predictproba_fn,features,X_train.values,valueCount) output_json = {"status":"SUCCESS","data":{"data":image_64,"most_influencedfeature":m_imp_f,"interceptionpoint":xi,"sentences":sentences,"feature_importance":json.loads(f_imp),"anchorjson":json.loads(anchorjson)}} output_json = json.dumps(output_json) print("aion_ai_explanation:",output_json) return(output_json) def local_analysis(jsonData): jsonData = json.loads(jsonData) displaypath = os.path.join(os.path.dirname(os.path.abspath(__file__)),"etc","display.json") with open(displaypath) as file: config = json.load(file) file.close() model = joblib.load(os.path.join(os.path.dirname(os.path.abspath(__file__)),"model",config['saved_model'])) predict_fn = lambda x: model.predict(x) dathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','postprocesseddata.csv.gz') dataFrame=pd.read_csv(dathPath,compression='gzip') testdathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'predicteddata.csv.gz') testdataFrame = pd.read_csv(testdathPath, compression='gzip') features = config['modelFeatures']#['SepalWidthCm','PetalLengthCm'] targetfeature = config['targetFeature']#'Species' targetData = dataFrame[targetfeature] valueCount=targetData.value_counts() categoryList=valueCount.keys().tolist() class_names = categoryList #class_names = class_names.sort() X_train = dataFrame[features] from pandas import json_normalize df_test = json_normalize(jsonData) df_test = preprocessing(df_test) df_test = df_test[features] from alibi.explainers import AnchorTabular explainer = AnchorTabular(predict_fn, features) explainer.fit(X_train.values) df_test = df_test.values prediction = explainer.predictor(df_test.reshape(1, -1))[0] labelMap = config['labelMaps'] if len(labelMap) > 0: prediction = list(labelMap.keys())[list(labelMap.values()).index(prediction)] else: prediction = str(prediction) try: explanation = explainer.explain(df_test,threshold=0.85) if str(explanation.anchor) == '[]': anchor = 'NA' precision = str(round(explanation.precision[0],2)) else: anchor = '%s' % (' AND '.join(explanation.anchor)) precision = str(round(explanation.precision,2)) coverage = explanation.coverage except Exception as e: print(e) anchor = 'NA' precision = 0 coverage = 0 df_test_waterfall = testdataFrame forceplot,multidecisionplot,waterfallplot = feature_importance_using_shap(model,X_train.head(300).values,features,class_names,df_test,df_test_waterfall) output_json = {"status":"SUCCESS","data":{"anchor":anchor,"precision":precision,"coverage":coverage,"prediction":prediction,"forceplot":forceplot,"multidecisionplot":multidecisionplot,"waterfallplot":waterfallplot}} #print(output_json) output_json = json.dumps(output_json) print("aion_ai_explanation:",output_json) return(output_json) if __name__ == '__main__': analysis_type = sys.argv[1] if analysis_type.lower() == 'global': ale_analysis() if analysis_type.lower() == 'local': data = sys.argv[2] local_analysis(data)
explainabledl_ai.py
from script.inputprofiler import inputprofiler def preprocessing(data): profilerobj = inputprofiler() data = profilerobj.run(data) data = data.astype(np.float64) return(data) import matplotlib.pyplot as plt try: from sklearn.externals import joblib except: import joblib import os,sys import pandas as pd from alibi.explainers import ALE,plot_ale import io import json import urllib, base64 import numpy as np from scipy.stats import linregress from statistics import mean from tensorflow.keras.models import load_model from tensorflow.keras import backend as K import tensorflow as tf tf.compat.v1.disable_eager_execution() def recall_m(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision_m(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision def f1_m(y_true, y_pred): precision = precision_m(y_true, y_pred) recall = recall_m(y_true, y_pred) return 2*((precision*recall)/(precision+recall+K.epsilon())) def rmse_m(y_true, y_pred): return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1)) def r_square(y_true, y_pred): SS_res = K.sum(K.square(y_true-y_pred)) SS_tot = K.sum(K.square(y_true-K.mean(y_true))) return (1 - SS_res/(SS_tot+K.epsilon())) def get_ranked_values(explanation): ranked_shap_vals = [] for cls_idx in range(len(explanation.shap_values)): this_ranking = ( explanation.raw['importances'][str(cls_idx)]['ranked_effect'], explanation.raw['importances'][str(cls_idx)]['names'] ) ranked_shap_vals.append(this_ranking) return ranked_shap_vals def feature_importance_using_shap(model,X,featuresNames,classes,x_test,problemType,modelname,x_test_waterfall): from alibi.explainers import KernelShap import shap shap.initjs() if hasattr(model, "decision_function") and problemType.lower() == 'classification': pred_fcn = model.decision_function elif hasattr(model, "predict_proba") and problemType.lower() == 'classification': pred_fcn = lambda x: model.predict_proba(np.expand_dims(x, axis=2)) else: if modelname == 'Neural Network': pred_fcn = lambda x: model.predict(x) else: pred_fcn = lambda x: model.predict(np.expand_dims(x, axis=2)) svm_explainer = KernelShap(pred_fcn,feature_names=featuresNames) xtest = x_test[0].reshape(1, -1) svm_explainer.fit(X,n_background_samples=100) svm_explanation = svm_explainer.explain(xtest) try: idx = 0 instance = x_test[0][None, :] if problemType.lower() == 'classification': if modelname == 'Neural Network': instance = x_test else: instance = np.expand_dims(x_test, axis=2) pred = np.argmax(model.predict(instance),axis=1) class_idx = pred.item() else: instance = np.expand_dims(x_test, axis=2) pred = model.predict(instance) class_idx = 0 if isinstance(svm_explainer.expected_value,np.ndarray): forceplot = shap.force_plot(svm_explainer.expected_value[class_idx],svm_explanation.shap_values[class_idx][idx,:],instance,feature_names=featuresNames,matplotlib=True,show=False) else: forceplot = shap.force_plot(svm_explainer.expected_value,svm_explanation.shap_values[0][idx,:],instance,feature_names=featuresNames,matplotlib=True,show=False) plt.tight_layout(pad = 0) image = io.BytesIO() plt.savefig(image, format='png') image.seek(0) string = base64.b64encode(image.read()) image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) except Exception as inst: print(inst) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) image_64 = '' try: plt.clf() if isinstance(svm_explainer.expected_value,np.ndarray): r = shap.multioutput_decision_plot(svm_explainer.expected_value.tolist(), svm_explanation.shap_values, idx, feature_names=featuresNames, feature_order='importance', highlight=[class_idx], legend_labels=classes, return_objects=True, legend_location='lower right',show=False) else: expectedvalues = [svm_explainer.expected_value] r = shap.multioutput_decision_plot(expectedvalues, svm_explanation.shap_values, idx, feature_names=featuresNames, highlight = [0], return_objects=True, legend_labels=['Value'], feature_order='importance', show=False) plt.tight_layout(pad = 0) image = io.BytesIO() plt.savefig(image, format='png') image.seek(0) string = base64.b64encode(image.read()) image2_64 = 'data:image/png;base64,' + urllib.parse.quote(string) except Exception as inst: print(inst) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) image2_64 = '' try: plt.clf() explainer = shap.DeepExplainer(model, X) shap_values = explainer.shap_values(x_test_waterfall.values) exp = shap.plots._waterfall.waterfall_legacy(explainer.expected_value[0].numpy(), shap_values[0][0],feature_names=featuresNames,show=False) image = io.BytesIO() plt.savefig(image, format='png', bbox_inches='tight') image.seek(0) string = base64.b64encode(image.read()) image3_64 = 'data:image/png;base64,' + urllib.parse.quote(string) except Exception as inst: print(inst) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) image3_64 = '' return(image_64,image2_64,image3_64) def feature_importance(xtrain,ytrain,xfeatures,yfeature,problemType): if problemType == 'classification': from sklearn.feature_selection import SelectFromModel from sklearn.ensemble import ExtraTreesClassifier selector = SelectFromModel(ExtraTreesClassifier()) selector.fit(xtrain,ytrain) values = selector.estimator_.feature_importances_ elif problemType == 'regression': from sklearn.feature_selection import SelectFromModel from sklearn.linear_model import Lasso selector = SelectFromModel(Lasso()) selector.fit(xtrain,ytrain) values = np.abs(selector.estimator_.coef_) labels = xtrain.columns.tolist() dft = pd.DataFrame() dft['labels'] = labels dft['values'] = values maxrecord = dft.iloc[dft['values'].argmax()] mostimportantfeature = maxrecord['labels'] f_imp = dft.to_json(orient='records') return(f_imp,mostimportantfeature) def get_trust_score(prdictfn,proba_fun,X_train,y_train): from alibi.confidence import TrustScore ts = TrustScore(k_filter=10,alpha=.05,filter_type='distance_knn',leaf_size=40,metric='euclidean',dist_filter_type='point') ts.fit(X_train, y_train, classes=3) y_pred = prdictfn(X_train) #y_prod = proba_fun(X_train) #probas = y_prod[range(len(y_pred)), y_pred] score, closest_class = ts.score(X_train, y_pred,k=2,dist_type='point') return(mean(score)) def getCounterFactuals(model,prdictfn,features,x_train,categories): from alibi.explainers import CounterFactualProto cat_vars_ord = {} categoryList=categories.keys().tolist() categoryCountList=categories.tolist() for i in range(0,len(categoryCountList)): cat_vars_ord[categoryList[i]] = categoryCountList[i] print(cat_vars_ord) X = x_train[0].reshape((1,) + x_train[0].shape) shape = X.shape print(shape) beta = .01 c_init = 1. c_steps = 5 max_iterations = 500 rng = (-1., 1.) # scale features between -1 and 1 feature_range = (x_train.min(axis=0), x_train.max(axis=0)) cf = CounterFactualProto(prdictfn,shape,cat_vars=cat_vars_ord) explanation = cf.explain(X) print(explanation) def getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap, class_percent=None): threshold = 0.95 from alibi.explainers import AnchorTabular explainer = AnchorTabular(predict_fn, features) explainer.fit(X_train.values) X_test = X_test.values anchors = [] for idx in range(len(X_test)): prediction = explainer.predictor(X_test[idx].reshape(1, -1))[0] if isinstance(prediction,np.ndarray): prediction = prediction[0] if len(labelMap) > 0: predictionstr = list(labelMap.keys())[list(labelMap.values()).index(prediction)] else: predictionstr = str(prediction) try: explanation = explainer.explain(X_test[idx],threshold=threshold) if str(explanation.anchor) == '[]': if class_percent and class_percent.get(prediction, 0.0) > threshold: anchor = f"Cannot explain the prediction of this class ({predictionstr}) since there is no salient subset of features that is necessary for the prediction to hold. This behaviour is typical when the data is very imbalanced and is seen for the majority class in a classification problem." else: anchor = f'Can not get the explaination for {predictionstr}.' precision = explanation.precision[0] else: anchor = '%s' % (' AND '.join(explanation.anchor)) precision = explanation.precision coverage = explanation.coverage except Exception as inst: print(inst) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) anchor = 'Reason Not found' precision = 0 anchorjson = {} anchorjson['features'] = eval(str(features)) anchorjson['values'] = eval(str(list(X_test[idx]))) anchorjson['prediction'] = predictionstr anchorjson['precision'] = precision anchorjson['anchor'] = anchor anchors.append(anchorjson) return(json.dumps(anchors)) def ale_analysis(): displaypath = os.path.join(os.path.dirname(os.path.abspath(__file__)),"etc","display.json") with open(displaypath) as file: config = json.load(file) file.close() scoreParam = config['scoreParam'] filename_dl = os.path.join(os.path.dirname(os.path.abspath(__file__)),"model",config['saved_model']) if(scoreParam.lower() == 'rmse'): model = load_model(filename_dl,custom_objects={"rmse": rmse_m},compile=False) model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[rmse_m]) elif(scoreParam.lower() == 'r2'): model = load_model(filename_dl,custom_objects={"r2": r_square},compile=False) model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[r_square]) elif(scoreParam.lower() == 'recall'): model = load_model(filename_dl,custom_objects={"recall": recall_m},compile=False) model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[recall_m]) elif(scoreParam.lower() == 'precision'): model = load_model(filename_dl,custom_objects={"precision": precision_m},compile=False) model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[precision_m]) elif(scoreParam.lower() == 'roc_auc'): model = load_model(filename_dl,compile=False) model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[tf.keras.metrics.AUC()]) elif(scoreParam.lower() == 'f1_score'): model = load_model(filename_dl,custom_objects={"f1_score": f1_m},compile=False) model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[f1_m]) else: model = load_model(filename_dl) if config['modelname'] == 'Neural Network': predict_fn = lambda x: model.predict(x) else: predict_fn = lambda x: model.predict(np.expand_dims(x, axis=2)) predictproba_fn = lambda x: model.predict_proba(np.expand_dims(x, axis=2)) dathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','postprocesseddata.csv.gz') dataFrame=pd.read_csv(dathPath,compression='gzip') testdathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','predicteddata.csv.gz') testdataFrame=pd.read_csv(testdathPath,compression='gzip') features = config['modelFeatures']#['SepalWidthCm','PetalLengthCm'] targetfeature = config['targetFeature']#'Species' labelMap = config['labelMaps'] targetData = dataFrame[targetfeature] if config['problemType'].lower() == 'regression': X_train = dataFrame[features] X_test = testdataFrame.head(5) X_test = X_test[features] else: valueCount=targetData.value_counts() class_percent = (valueCount/ len(targetData)).to_dict() categoryList=valueCount.keys().tolist() class_names = categoryList X_train = dataFrame[features] X_test = testdataFrame.groupby('predict').first().reset_index() X_test = X_test[features] f_imp,m_imp_f = feature_importance(X_train,targetData,features,targetfeature,config['problemType'].lower()) if hasattr(model, "decision_function") and config['problemType'].lower() == 'classification': logit_fun_lr = model.decision_function try: logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=categoryList) logit_exp_lr = logit_ale_lr.explain(X_train.values) values = logit_exp_lr.ale_values feature = logit_exp_lr.feature_names feature_values = logit_exp_lr.feature_values lines= [] sentences = [] for x in range(0,len(feature)): f_value = feature_values[x] value = values[x] lines= [] for y in range(0,len(class_names)): line = [] for z in value: cordinate = z[y] line.append(cordinate) lines.append(line) line = lines[0] slope1, intercept1, r_value, p_value, std_err = linregress(f_value,line) line = lines[1] slope2, intercept2, r_value, p_value, std_err = linregress(f_value,line) xi = (intercept1-intercept2) / (slope2-slope1) xi = round(xi,2) lastvalues = {} i = 0 for line in lines: value = line[len(line)-1] lastvalues[class_names[i]] = value i = i+1 Keymax = max(lastvalues, key=lastvalues.get) Keymin = min(lastvalues, key=lastvalues.get) Keymaxclass = list(labelMap.keys())[list(labelMap.values()).index(Keymax)] Keyminclass = list(labelMap.keys())[list(labelMap.values()).index(Keymin)] sentense = '<b>Effect of '+str(feature[x])+'</b><br>For data samples having <b>'+str(feature[x])+'</b> >= <b>~'+str(xi)+'</b> ,there is a very high chance that they are of class <b>'+str(Keymaxclass)+'</b> '+targetfeature+'. For data samples having <b>'+str(feature[x])+'</b> < <b>~'+str(xi)+'</b> there is a very high change that they are of class <b>'+str(Keyminclass)+'</b> '+targetfeature+'.' sentences.append(sentense) except: sentense = '' sentences.append(sentense) xi = 0 elif hasattr(model, "predict_proba") and config['problemType'].lower() == 'classification': logit_fun_lr = lambda x: model.predict_proba(np.expand_dims(x, axis=2)) logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=categoryList) print(model.__class__) try: logit_exp_lr = logit_ale_lr.explain(X_train.values) except: X = np.expand_dims(X_train, axis=2) logit_exp_lr = logit_ale_lr.explain(X) values = logit_exp_lr.ale_values feature = logit_exp_lr.feature_names feature_values = logit_exp_lr.feature_values lines= [] sentences = [] sentense = 'Graphs gives a feature value how much more(less) probability does the model assign to each class relative to mean prediction. This also means that any increase in relative probability of one class must result into a decrease in probability of another class.' sentences.append(sentense) xi = 0 elif hasattr(model, "predict"): try: if config['modelname'] == 'Neural Network': logit_fun_lr = lambda x: model.predict(x) else: logit_fun_lr = lambda x: model.predict(np.expand_dims(x, axis=2)) logit_ale_lr = ALE(logit_fun_lr, feature_names=features, target_names=['Value']) logit_exp_lr = logit_ale_lr.explain(X_train.values) values = logit_exp_lr.ale_values feature = logit_exp_lr.feature_names feature_values = logit_exp_lr.feature_values lines= [] sentences = [] sentense = 'The ALE value corresponding to that feature value is difference to the mean effect of that feature. Put differently, the ALE value is the relative feature effect on the prediction at that feature value.' sentences.append(sentense) xi = 0 except: xi = 0 sentences = [] if (len(features)%2 ==0): n_cols = int(len(features)/2) else: n_cols = int(len(features)/2)+1 figheight = n_cols*3 try: plot_ale(logit_exp_lr,n_cols=2, fig_kw={'figwidth': 8, 'figheight': figheight}) plt.tight_layout(pad = 0) image = io.BytesIO() plt.savefig(image, format='png') image.seek(0) string = base64.b64encode(image.read()) image_64 = 'data:image/png;base64,' + urllib.parse.quote(string) except: image_64 = '' #score = get_trust_score(model.predict,proba_fun_lr,X_train.values,targetData.values) if config['problemType'].lower() == 'classification': anchorjson = getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap,class_percent) else: anchorjson = getAnchorTabularofFirstRecord(predict_fn,features,X_train,X_test,labelMap) #anchors=[] #anchorjson = json.dumps(anchors) #feature_importance_using_shap(model,X_train.values,features,class_names) #getCounterFactuals(model,predictproba_fn,features,X_train.values,valueCount) output_json = {"status":"SUCCESS","data":{"data":image_64,"most_influencedfeature":m_imp_f,"interceptionpoint":xi,"sentences":sentences,"feature_importance":json.loads(f_imp),"anchorjson":json.loads(anchorjson)}} output_json = json.dumps(output_json) print("aion_ai_explanation:",output_json) return(output_json) def local_analysis(jsonData): jsonData = json.loads(jsonData) displaypath = os.path.join(os.path.dirname(os.path.abspath(__file__)),"etc","display.json") with open(displaypath) as file: config = json.load(file) file.close() scoreParam = config['scoreParam'] filename_dl = os.path.join(os.path.dirname(os.path.abspath(__file__)),"model",config['saved_model']) if(scoreParam.lower() == 'rmse'): model = load_model(filename_dl,custom_objects={"rmse": rmse_m},compile=False) model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[rmse_m]) elif(scoreParam.lower() == 'r2'): model = load_model(filename_dl,custom_objects={"r2": r_square},compile=False) model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[r_square]) elif(scoreParam.lower() == 'recall'): model = load_model(filename_dl,custom_objects={"recall": recall_m},compile=False) model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[recall_m]) elif(scoreParam.lower() == 'precision'): model = load_model(filename_dl,custom_objects={"precision": precision_m},compile=False) model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[precision_m]) elif(scoreParam.lower() == 'roc_auc'): model = load_model(filename_dl,compile=False) model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[tf.keras.metrics.AUC()]) elif(scoreParam.lower() == 'f1_score'): model = load_model(filename_dl,custom_objects={"f1_score": f1_m},compile=False) model.compile(loss='binary_crossentropy',optimizer='Nadam', metrics=[f1_m]) else: model = load_model(filename_dl) if config['modelname'] == 'Neural Network': predict_fn = lambda x: model.predict(x) else: predict_fn = lambda x: model.predict(np.expand_dims(x, axis=2)) dathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'data','postprocesseddata.csv.gz') dataFrame=pd.read_csv(dathPath,compression='gzip') testdathPath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'predicteddata.csv.gz') testdataFrame = pd.read_csv(testdathPath,compression='gzip') features = config['modelFeatures']#['SepalWidthCm','PetalLengthCm'] targetfeature = config['targetFeature']#'Species' targetData = dataFrame[targetfeature] valueCount=targetData.value_counts() categoryList=valueCount.keys().tolist() class_names = categoryList #class_names = class_names.sort() X_train = dataFrame[features] from pandas import json_normalize df_test = json_normalize(jsonData) df_test = preprocessing(df_test) df_test = df_test[features] from alibi.explainers import AnchorTabular explainer = AnchorTabular(predict_fn, features) explainer.fit(X_train.values) df_test = df_test.values prediction = explainer.predictor(df_test.reshape(1, -1))[0] if isinstance(prediction,np.ndarray): prediction = prediction[0] labelMap = config['labelMaps'] if len(labelMap) > 0: prediction = list(labelMap.keys())[list(labelMap.values()).index(prediction)] else: prediction = str(prediction) try: explanation = explainer.explain(df_test.reshape(1, -1),threshold=0.85) if str(explanation.anchor) == '[]': anchor = 'NA' precision = explanation.precision[0] else: anchor = '%s' % (' AND '.join(explanation.anchor)) precision = explanation.precision coverage = explanation.coverage except Exception as inst: print(inst) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) anchor = 'NA' precision = 0 coverage = 0 df_test_waterfall = testdataFrame forceplot,multidecisionplot,waterfallplot = feature_importance_using_shap(model,X_train.head(300).values,features,class_names,df_test,config['problemType'],config['modelname'],df_test_waterfall) output_json = {"status":"SUCCESS","data":{"anchor":anchor,"precision":precision,"coverage":coverage,"prediction":prediction,"forceplot":forceplot,"multidecisionplot":multidecisionplot,'waterfallplot':waterfallplot}} print(output_json) output_json = json.dumps(output_json) print("aion_ai_explanation:",output_json) return(output_json) if __name__ == '__main__': analysis_type = sys.argv[1] if analysis_type.lower() == 'global': ale_analysis() if analysis_type.lower() == 'local': data = sys.argv[2] #jsonData = json.loads(data) local_analysis(data)
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
dataProfiler.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import io import json import logging import pandas as pd import sys import numpy as np from pathlib import Path from word2number import w2n from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OrdinalEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.impute import SimpleImputer, KNNImputer from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.preprocessing import FunctionTransformer from sklearn.preprocessing import MinMaxScaler,StandardScaler from sklearn.preprocessing import PowerTransformer from sklearn.compose import ColumnTransformer from sklearn.base import TransformerMixin from sklearn.ensemble import IsolationForest from category_encoders import TargetEncoder import scipy try: import transformations.data_profiler_functions as cs except: import data_profiler_functions as cs if 'AION' in sys.modules: try: from appbe.app_config import DEBUG_ENABLED except: DEBUG_ENABLED = False else: DEBUG_ENABLED = False log_suffix = f'[{Path(__file__).stem}] ' class profiler(): def __init__(self, xtrain, ytrain=None, target=None, encode_target = False, config={}, keep_unprocessed=[],data_path=None,log=None): if not isinstance(xtrain, pd.DataFrame): raise ValueError(f'{log_suffix}supported data type is pandas.DataFrame but provide data is of {type(xtrain)} type') if xtrain.empty: raise ValueError(f'{log_suffix}Data frame is empty') if target and target in xtrain.columns: self.target = xtrain[target] xtrain.drop(target, axis=1, inplace=True) self.target_name = target elif ytrain: self.target = ytrain self.target_name = 'target' else: self.target = pd.Series() self.target_name = None self.data_path = data_path self.encode_target = encode_target self.label_encoder = None self.data = xtrain self.keep_unprocessed = keep_unprocessed self.colm_type = {} for colm, infer_type in zip(self.data.columns, self.data.dtypes): self.colm_type[colm] = infer_type self.numeric_feature = [] self.cat_feature = [] self.text_feature = [] self.wordToNumericFeatures = [] self.added_features = [] self.pipeline = [] self.dropped_features = {} self.train_features_type={} self.__update_type() self.config = config self.featureDict = config.get('featureDict', []) self.output_columns = [] self.feature_expender = [] self.text_to_num = {} self.force_numeric_conv = [] if log: self.log = log else: self.log = logging.getLogger('eion') self.type_conversion = {} def log_dataframe(self, msg=None): buffer = io.StringIO() self.data.info(buf=buffer) if msg: log_text = f'Data frame after {msg}:' else: log_text = 'Data frame:' log_text += '\n\t'+str(self.data.head(2)).replace('\n','\n\t') log_text += ('\n\t' + buffer.getvalue().replace('\n','\n\t')) self.log.info(log_text) def transform(self): if self.is_target_available(): if self.target_name: self.log.info(f"Target feature name: '{self.target_name}'") self.log.info(f"Target feature size: {len(self.target)}") else: self.log.info(f"Target feature not present") self.log_dataframe() print(self.data.info()) try: self.process() except Exception as e: self.log.error(e, exc_info=True) raise pipe = FeatureUnion(self.pipeline) try: if self.text_feature: from text.textProfiler import set_pretrained_model set_pretrained_model(pipe) conversion_method = self.get_conversion_method() process_data = pipe.fit_transform(self.data, y=self.target) # save for testing if DEBUG_ENABLED: if isinstance(process_data, scipy.sparse.spmatrix): process_data = process_data.toarray() df = pd.DataFrame(process_data) df.to_csv('debug_preprocessed.csv', index=False) if self.text_feature and conversion_method == 'latentsemanticanalysis': n_size = self.get_tf_idf_output_size( pipe) dimensions = self.get_tf_idf_dimensions() if n_size < dimensions or n_size > dimensions: dimensions = n_size from sklearn.decomposition import TruncatedSVD reducer = TruncatedSVD( n_components = dimensions) reduced_data = reducer.fit_transform( process_data[:,-n_size:]) text_process_idx = [t[0] for t in pipe.transformer_list].index('text_process') pipe.transformer_list[text_process_idx][1].steps.append(('feature_reducer',reducer)) if isinstance(process_data, scipy.sparse.spmatrix): process_data = process_data.toarray() process_data = np.concatenate((process_data[:,:-n_size], reduced_data), axis=1) last_step = self.feature_expender.pop() self.feature_expender.append({'feature_reducer':list(last_step.values())[0]}) except EOFError as e: if "Compressed file ended before the end-of-stream marker was reached" in str(e): raise EOFError('Pretrained model is not downloaded properly') self.update_output_features_names(pipe) if isinstance(process_data, scipy.sparse.spmatrix): process_data = process_data.toarray() df = pd.DataFrame(process_data, index=self.data.index, columns=self.output_columns) if self.is_target_available() and self.target_name: df[self.target_name] = self.target if self.keep_unprocessed: df[self.keep_unprocessed] = self.data[self.keep_unprocessed] self.log_numerical_fill() self.log_categorical_fill() self.log_normalization() return df, pipe, self.label_encoder def log_type_conversion(self): if self.log: self.log.info('----------- Inspecting Features -----------') self.log.info('----------- Type Conversion -----------') count = 0 for k, v in self.type_conversion.items(): if v[0] != v[1]: self.log.info(f'-------> {k} -> from {v[0]} to {v[1]} : {v[2]}') self.log.info('Status:- |... Feature inspection done') def check_config(self): removeDuplicate = self.config.get('removeDuplicate', False) self.config['removeDuplicate'] = cs.get_boolean(removeDuplicate) self.config['misValueRatio'] = float(self.config.get('misValueRatio', cs.default_config['misValueRatio'])) self.config['numericFeatureRatio'] = float(self.config.get('numericFeatureRatio', cs.default_config['numericFeatureRatio'])) self.config['categoryMaxLabel'] = int(self.config.get('categoryMaxLabel', cs.default_config['categoryMaxLabel'])) featureDict = self.config.get('featureDict', []) if isinstance(featureDict, dict): self.config['featureDict'] = [] if isinstance(featureDict, str): self.config['featureDict'] = [] def process(self): #remove duplicate not required at the time of prediction self.check_config() self.remove_constant_feature() self.remove_empty_feature(self.config['misValueRatio']) self.remove_index_features() self.dropna() if self.config['removeDuplicate']: self.drop_duplicate() #self.check_categorical_features() #self.string_to_numeric() self.process_target() self.train_features_type = {k:v for k,v in zip(self.data.columns, self.data.dtypes)} self.parse_process_step_config() self.process_drop_fillna() self.log_type_conversion() self.update_num_fill_dict() if DEBUG_ENABLED: print(self.num_fill_method_dict) self.update_cat_fill_dict() self.create_pipeline() self.text_pipeline(self.config) self.apply_outlier() if DEBUG_ENABLED: self.log.info(self.process_method) self.log.info(self.pipeline) def is_target_available(self): return (isinstance(self.target, pd.Series) and not self.target.empty) or len(self.target) def process_target(self, operation='encode', arg=None): if self.is_target_available(): # drop null values self.__update_index( self.target.notna(), 'target') if self.encode_target: self.label_encoder = LabelEncoder() self.target = self.label_encoder.fit_transform(self.target) return self.label_encoder return None def is_target_column(self, column): return column == self.target_name def fill_default_steps(self): num_fill_method = cs.get_one_true_option(self.config.get('numericalFillMethod',{})) normalization_method = cs.get_one_true_option(self.config.get('normalization',{}),'none') for colm in self.numeric_feature: if num_fill_method: self.fill_missing_value_method(colm, num_fill_method.lower()) if normalization_method: self.fill_normalizer_method(colm, normalization_method.lower()) cat_fill_method = cs.get_one_true_option(self.config.get('categoricalFillMethod',{})) cat_encode_method = cs.get_one_true_option(self.config.get('categoryEncoding',{})) for colm in self.cat_feature: if cat_fill_method: self.fill_missing_value_method(colm, cat_fill_method.lower()) if cat_encode_method: self.fill_encoder_value_method(colm, cat_encode_method.lower(), default=True) def parse_process_step_config(self): self.process_method = {} user_provided_data_type = {} for feat_conf in self.featureDict: colm = feat_conf.get('feature', '') if not self.is_target_column(colm): if colm in self.data.columns: user_provided_data_type[colm] = feat_conf['type'] if user_provided_data_type: self.update_user_provided_type(user_provided_data_type) self.fill_default_steps() for feat_conf in self.featureDict: colm = feat_conf.get('feature', '') if not self.is_target_column(colm): if colm in self.data.columns: if feat_conf.get('fillMethod', None): self.fill_missing_value_method(colm, feat_conf['fillMethod'].lower()) if feat_conf.get('categoryEncoding', None): self.fill_encoder_value_method(colm, feat_conf['categoryEncoding'].lower()) if feat_conf.get('normalization', None): self.fill_normalizer_method(colm, feat_conf['normalization'].lower()) if feat_conf.get('outlier', None): self.fill_outlier_method(colm, feat_conf['outlier'].lower()) if feat_conf.get('outlierOperation', None): self.fill_outlier_process(colm, feat_conf['outlierOperation'].lower()) def get_tf_idf_dimensions(self): dim = cs.get_one_true_option(self.config.get('embeddingSize',{}).get('TF_IDF',{}), 'default') return {'default': 300, '50d':50, '100d':100, '200d':200, '300d':300}[dim] def get_tf_idf_output_size(self, pipe): start_index = {} for feat_expender in self.feature_expender: if feat_expender: step_name = list(feat_expender.keys())[0] index = list(feat_expender.values())[0] for transformer_step in pipe.transformer_list: if transformer_step[1].steps[-1][0] in step_name: start_index[index] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()} if start_index: for key,value in start_index.items(): for k,v in value.items(): if k == 'vectorizer': return len(v) return 0 def update_output_features_names(self, pipe): columns = self.output_columns start_index = {} index_shifter = 0 for feat_expender in self.feature_expender: if feat_expender: step_name = list(feat_expender.keys())[0] for key,value in start_index.items(): for k,v in value.items(): index_shifter += len(v) index = list(feat_expender.values())[0] for transformer_step in pipe.transformer_list: if transformer_step[1].steps[-1][0] in step_name: start_index[index + index_shifter] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names_out()} #print(start_index) if start_index: for key,value in start_index.items(): for k,v in value.items(): if k == 'vectorizer': v = [f'{x}_vect' for x in v] self.output_columns[key:key] = v self.added_features = [*self.added_features, *v] def text_pipeline(self, conf_json): if self.text_feature: from text.textProfiler import textProfiler from text.textProfiler import textCombine pipeList = [] text_pipe = Pipeline([ ('selector', ColumnTransformer([ ("selector", "passthrough", self.text_feature) ], remainder="drop")), ("text_fillNa",SimpleImputer(strategy='constant', fill_value='')), ("merge_text_feature", textCombine())]) obj = textProfiler() pipeList = obj.textProfiler(conf_json, pipeList, self.data_path) last_step = "merge_text_feature" for pipe_elem in pipeList: text_pipe.steps.append((pipe_elem[0], pipe_elem[1])) last_step = pipe_elem[0] text_transformer = ('text_process', text_pipe) self.pipeline.append(text_transformer) self.feature_expender.append({last_step:len(self.output_columns)}) def create_pipeline(self): num_pipe = {} for k,v in self.num_fill_method_dict.items(): for k1,v1 in v.items(): if k1 and k1 != 'none': num_pipe[f'{k}_{k1}'] = Pipeline([ ('selector', ColumnTransformer([ ("selector", "passthrough", v1) ], remainder="drop")), (k, self.get_num_imputer(k)), (k1, self.get_num_scaler(k1)) ]) else: num_pipe[f'{k}_{k1}'] = Pipeline([ ('selector', ColumnTransformer([ ("selector", "passthrough", v1) ], remainder="drop")), (k, self.get_num_imputer(k)) ]) self.output_columns.extend(v1) cat_pipe = {} for k,v in self.cat_fill_method_dict.items(): for k1,v1 in v.items(): cat_pipe[f'{k}_{k1}'] = Pipeline([ ('selector', ColumnTransformer([ ("selector", "passthrough", v1) ], remainder="drop")), (k, self.get_cat_imputer(k)), (k1, self.get_cat_encoder(k1)) ]) if k1 not in ['onehotencoding']: self.output_columns.extend(v1) else: self.feature_expender.append({k1:len(self.output_columns)}) for key, pipe in num_pipe.items(): self.pipeline.append((key, pipe)) for key, pipe in cat_pipe.items(): self.pipeline.append((key, pipe)) "Drop: feature during training but replace with zero during prediction " def process_drop_fillna(self): drop_column = [] if 'numFill' in self.process_method.keys(): for col, method in self.process_method['numFill'].items(): if method == 'drop': self.process_method['numFill'][col] = 'zero' drop_column.append(col) if 'catFill' in self.process_method.keys(): for col, method in self.process_method['catFill'].items(): if method == 'drop': self.process_method['catFill'][col] = 'zero' drop_column.append(col) if drop_column: self.data.dropna(subset=drop_column, inplace=True) def update_num_fill_dict(self): self.num_fill_method_dict = {} if 'numFill' in self.process_method.keys(): for f in cs.supported_method['fillNa']['numeric']: self.num_fill_method_dict[f] = {} for en in cs.supported_method['normalization']: self.num_fill_method_dict[f][en] = [] for col in self.numeric_feature: numFillDict = self.process_method.get('numFill',{}) normalizationDict = self.process_method.get('normalization',{}) if f == numFillDict.get(col, '') and en == normalizationDict.get(col,''): self.num_fill_method_dict[f][en].append(col) if not self.num_fill_method_dict[f][en] : del self.num_fill_method_dict[f][en] if not self.num_fill_method_dict[f]: del self.num_fill_method_dict[f] def update_cat_fill_dict(self): self.cat_fill_method_dict = {} if 'catFill' in self.process_method.keys(): for f in cs.supported_method['fillNa']['categorical']: self.cat_fill_method_dict[f] = {} for en in cs.supported_method['categoryEncoding']: self.cat_fill_method_dict[f][en] = [] for col in self.cat_feature: catFillDict = self.process_method.get('catFill',{}) catEncoderDict = self.process_method.get('catEncoder',{}) if f == catFillDict.get(col, '') and en == catEncoderDict.get(col,''): self.cat_fill_method_dict[f][en].append(col) if not self.cat_fill_method_dict[f][en] : del self.cat_fill_method_dict[f][en] if not self.cat_fill_method_dict[f]: del self.cat_fill_method_dict[f] def __update_type(self): self.numeric_feature = list( set(self.data.select_dtypes(include='number').columns.tolist()) - set(self.keep_unprocessed)) self.cat_feature = list( set(self.data.select_dtypes(include='category').columns.tolist()) - set(self.keep_unprocessed)) self.text_feature = list( set(self.data.select_dtypes(include='object').columns.tolist()) - set(self.keep_unprocessed)) self.datetime_feature = list( set(self.data.select_dtypes(include='datetime').columns.tolist()) - set(self.keep_unprocessed)) def update_user_provided_type(self, data_types): allowed_types = ['numerical','categorical', 'text'] skipped_types = ['date','index'] type_mapping = {'numerical': np.dtype('float'), 'float': np.dtype('float'),'categorical': 'category', 'text':np.dtype('object'),'date':'datetime64[ns]','index': np.dtype('int64'),} mapped_type = {k:type_mapping[v] for k,v in data_types.items() if v in allowed_types} skipped_features = [k for k,v in data_types.items() if v in skipped_types] if skipped_features: self.keep_unprocessed.extend( skipped_features) self.keep_unprocessed = list(set(self.keep_unprocessed)) self.update_type(mapped_type, 'user provided data type') def get_type(self, as_list=False): if as_list: return [self.colm_type.values()] else: return self.colm_type def update_type(self, data_types={}, reason=''): invalid_features = [x for x in data_types.keys() if x not in self.data.columns] if invalid_features: valid_feat = list(set(data_types.keys()) - set(invalid_features)) valid_feat_type = {k:v for k,v in data_types if k in valid_feat} else: valid_feat_type = data_types for k,v in valid_feat_type.items(): if v != self.colm_type[k].name: try: self.data.astype({k:v}) self.colm_type.update({k:self.data[k].dtype}) self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason) except: self.type_conversion[k] = (self.colm_type[k] , v, 'Fail', reason) if v == np.dtype('float64') and self.colm_type[k].name == 'object': if self.check_numeric( k): self.data[ k] = pd.to_numeric(self.data[ k], errors='coerce') self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason) self.force_numeric_conv.append( k) else: raise ValueError(f"Can not convert '{k}' feature to 'numeric' as numeric values are less than {self.config['numericFeatureRatio'] * 100}%") self.data = self.data.astype(valid_feat_type) self.__update_type() def check_numeric(self, feature): col_values = self.data[feature].copy() col_values = pd.to_numeric(col_values, errors='coerce') if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)): return True return False def string_to_numeric(self): def to_number(x): try: return w2n.word_to_num(x) except: return np.nan for col in self.text_feature: col_values = self.data[col].copy() col_values = pd.to_numeric(col_values, errors='coerce') if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)): self.text_to_num[col] = 'float64' self.wordToNumericFeatures.append(col) if self.text_to_num: columns = list(self.text_to_num.keys()) self.data[columns] = self.data[columns].apply(lambda x: to_number(x), axis=1, result_type='broadcast') self.update_type(self.text_to_num) self.log.info('----------- Inspecting Features -----------') for col in self.text_feature: self.log.info(f'-------> Feature : {col}') if col in self.text_to_num: self.log.info('----------> Numeric Status :Yes') self.log.info('----------> Data Type Converting to numeric :Yes') else: self.log.info('----------> Numeric Status :No') self.log.info(f'\nStatus:- |... Feature inspection done for numeric data: {len(self.text_to_num)} feature(s) converted to numeric') self.log.info(f'\nStatus:- |... Feature word to numeric treatment done: {self.text_to_num}') self.log.info('----------- Inspecting Features End -----------') def check_categorical_features(self): num_data = self.data.select_dtypes(include='number') num_data_unique = num_data.nunique() num_to_cat_col = {} for i, value in enumerate(num_data_unique): if value < self.config['categoryMaxLabel']: num_to_cat_col[num_data_unique.index[i]] = 'category' if num_to_cat_col: self.update_type(num_to_cat_col, 'numerical to categorical') str_to_cat_col = {} str_data = self.data.select_dtypes(include='object') str_data_unique = str_data.nunique() for i, value in enumerate(str_data_unique): if value < self.config['categoryMaxLabel']: str_to_cat_col[str_data_unique.index[i]] = 'category' for colm in str_data.columns: if self.data[colm].str.len().max() < cs.default_config['str_to_cat_len_max']: str_to_cat_col[colm] = 'category' if str_to_cat_col: self.update_type(str_to_cat_col, 'text to categorical') def drop_features(self, features=[], reason='unspecified'): if isinstance(features, str): features = [features] feat_to_remove = [x for x in features if x in self.data.columns] if feat_to_remove: self.data.drop(feat_to_remove, axis=1, inplace=True) for feat in feat_to_remove: self.dropped_features[feat] = reason self.log_drop_feature(feat_to_remove, reason) self.__update_type() def __update_index(self, indices, reason=''): if isinstance(indices, (bool, pd.core.series.Series)) and len(indices) == len(self.data): if not indices.all(): self.data = self.data[indices] if self.is_target_available(): self.target = self.target[indices] self.log_update_index((indices == False).sum(), reason) def dropna(self): self.data.dropna(how='all',inplace=True) if self.is_target_available(): self.target = self.target[self.data.index] def drop_duplicate(self): index = self.data.duplicated(keep='first') self.__update_index( ~index, reason='duplicate') def log_drop_feature(self, columns, reason): self.log.info(f'---------- Dropping {reason} features ----------') self.log.info(f'\nStatus:- |... {reason} feature treatment done: {len(columns)} {reason} feature(s) found') self.log.info(f'-------> Drop Features: {columns}') self.log.info(f'Data Frame Shape After Dropping (Rows,Columns): {self.data.shape}') def log_update_index(self,count, reason): if count: if reason == 'target': self.log.info('-------> Null Target Rows Drop:') self.log.info(f'-------> Dropped rows count: {count}') elif reason == 'duplicate': self.log.info('-------> Duplicate Rows Drop:') self.log.info(f'-------> Dropped rows count: {count}') elif reason == 'outlier': self.log.info(f'-------> Dropped rows count: {count}') self.log.info('Status:- |... Outlier treatment done') self.log.info(f'-------> Data Frame Shape After Dropping samples(Rows,Columns): {self.data.shape}') def log_normalization(self): if self.process_method.get('normalization', None): self.log.info(f'\nStatus:- !... Normalization treatment done') for method in cs.supported_method['normalization']: cols = [] for col, m in self.process_method['normalization'].items(): if m == method: cols.append(col) if cols and method != 'none': self.log.info(f'Running {method} on features: {cols}') def log_numerical_fill(self): if self.process_method.get('numFill', None): self.log.info(f'\nStatus:- !... Fillna for numeric feature done') for method in cs.supported_method['fillNa']['numeric']: cols = [] for col, m in self.process_method['numFill'].items(): if m == method: cols.append(col) if cols: self.log.info(f'-------> Running {method} on features: {cols}') def log_categorical_fill(self): if self.process_method.get('catFill', None): self.log.info(f'\nStatus:- !... FillNa for categorical feature done') for method in cs.supported_method['fillNa']['categorical']: cols = [] for col, m in self.process_method['catFill'].items(): if m == method: cols.append(col) if cols: self.log.info(f'-------> Running {method} on features: {cols}') def remove_constant_feature(self): unique_values = self.data.nunique() constant_features = [] for i, value in enumerate(unique_values): if value == 1: constant_features.append(unique_values.index[i]) if constant_features: self.drop_features(constant_features, "constant") def remove_empty_feature(self, misval_ratio=1.0): missing_ratio = self.data.isnull().sum() / len(self.data) missing_ratio = {k:v for k,v in zip(self.data.columns, missing_ratio)} empty_features = [k for k,v in missing_ratio.items() if v > misval_ratio] if empty_features: self.drop_features(empty_features, "empty") def remove_index_features(self): index_feature = [] for feat in self.numeric_feature: if self.data[feat].nunique() == len(self.data): #if (self.data[feat].sum()- sum(self.data.index) == (self.data.iloc[0][feat]-self.data.index[0])*len(self.data)): # index feature can be time based count = (self.data[feat] - self.data[feat].shift() == 1).sum() if len(self.data) - count == 1: index_feature.append(feat) self.drop_features(index_feature, "index") def fill_missing_value_method(self, colm, method): if colm in self.numeric_feature: if method in cs.supported_method['fillNa']['numeric']: if 'numFill' not in self.process_method.keys(): self.process_method['numFill'] = {} if method == 'na' and self.process_method['numFill'].get(colm, None): pass # don't overwrite else: self.process_method['numFill'][colm] = method if colm in self.cat_feature: if method in cs.supported_method['fillNa']['categorical']: if 'catFill' not in self.process_method.keys(): self.process_method['catFill'] = {} if method == 'na' and self.process_method['catFill'].get(colm, None): pass else: self.process_method['catFill'][colm] = method def check_encoding_method(self, method, colm,default=False): if not self.is_target_available() and (method.lower() == list(cs.target_encoding_method_change.keys())[0]): method = cs.target_encoding_method_change[method.lower()] if default: self.log.info(f"Applying Label encoding instead of Target encoding on feature '{colm}' as target feature is not present") return method def fill_encoder_value_method(self,colm, method, default=False): if colm in self.cat_feature: if method.lower() in cs.supported_method['categoryEncoding']: if 'catEncoder' not in self.process_method.keys(): self.process_method['catEncoder'] = {} if method == 'na' and self.process_method['catEncoder'].get(colm, None): pass else: self.process_method['catEncoder'][colm] = self.check_encoding_method(method, colm,default) else: self.log.info(f"-------> categorical encoding method '{method}' is not supported. supported methods are {cs.supported_method['categoryEncoding']}") def fill_normalizer_method(self,colm, method): if colm in self.numeric_feature: if method in cs.supported_method['normalization']: if 'normalization' not in self.process_method.keys(): self.process_method['normalization'] = {} if (method == 'na' or method == 'none') and self.process_method['normalization'].get(colm, None): pass else: self.process_method['normalization'][colm] = method else: self.log.info(f"-------> Normalization method '{method}' is not supported. supported methods are {cs.supported_method['normalization']}") def apply_outlier(self): inlier_indice = np.array([True] * len(self.data)) if self.process_method.get('outlier', None): self.log.info('-------> Feature wise outlier detection:') for k,v in self.process_method['outlier'].items(): if k in self.numeric_feature: if v == 'iqr': index = cs.findiqrOutlier(self.data[k]) elif v == 'zscore': index = cs.findzscoreOutlier(self.data[k]) elif v == 'disable': index = None if k in self.process_method['outlierOperation'].keys(): if self.process_method['outlierOperation'][k] == 'dropdata': inlier_indice = np.logical_and(inlier_indice, index) elif self.process_method['outlierOperation'][k] == 'average': mean = self.data[k].mean() index = ~index self.data.loc[index,[k]] = mean self.log.info(f'-------> {k}: Replaced by Mean {mean}: total replacement {index.sum()}') elif self.process_method['outlierOperation'][k] == 'nochange' and v != 'disable': self.log.info(f'-------> Total outliers in "{k}": {(~index).sum()}') if self.config.get('outlierDetection',None): if self.config['outlierDetection'].get('IsolationForest','False') == 'True': if self.numeric_feature: index = cs.findiforestOutlier(self.data[self.numeric_feature]) inlier_indice = np.logical_and(inlier_indice, index) self.log.info(f'-------> Numeric feature based Outlier detection(IsolationForest):') if inlier_indice.sum() != len(self.data): self.__update_index(inlier_indice, 'outlier') def fill_outlier_method(self,colm, method): if colm in self.numeric_feature: if method in cs.supported_method['outlier_column_wise']: if 'outlier' not in self.process_method.keys(): self.process_method['outlier'] = {} if method not in ['Disable', 'na']: self.process_method['outlier'][colm] = method else: self.log.info(f"-------> outlier detection method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlier_column_wise']}") def fill_outlier_process(self,colm, method): if colm in self.numeric_feature: if method in cs.supported_method['outlierOperation']: if 'outlierOperation' not in self.process_method.keys(): self.process_method['outlierOperation'] = {} self.process_method['outlierOperation'][colm] = method else: self.log.info(f"-------> outlier process method '{method}' is not supported for column wise. supported methods are {cs.supported_method['outlierOperation']}") def get_cat_imputer(self,method): if method == 'mode': return SimpleImputer(strategy='most_frequent') elif method == 'zero': return SimpleImputer(strategy='constant', fill_value=0) def get_cat_encoder(self,method): if method == 'labelencoding': return OrdinalEncoder() elif method == 'onehotencoding': return OneHotEncoder(sparse=False,handle_unknown="ignore") elif method == 'targetencoding': if not self.is_target_available(): raise ValueError('Can not apply Target Encoding when target feature is not present') return TargetEncoder() def get_num_imputer(self,method): if method == 'mode': return SimpleImputer(strategy='most_frequent') elif method == 'mean': return SimpleImputer(strategy='mean') elif method == 'median': return SimpleImputer(strategy='median') elif method == 'knnimputer': return KNNImputer() elif method == 'zero': return SimpleImputer(strategy='constant', fill_value=0) def get_num_scaler(self,method): if method == 'minmax': return MinMaxScaler() elif method == 'standardscaler': return StandardScaler() elif method == 'lognormal': return PowerTransformer(method='yeo-johnson', standardize=False) def recommenderStartProfiler(self,modelFeatures): return cs.recommenderStartProfiler(self,modelFeatures) def folderPreprocessing(self,folderlocation,folderdetails,deployLocation): return cs.folderPreprocessing(self,folderlocation,folderdetails,deployLocation) def textSimilarityStartProfiler(self, doc_col_1, doc_col_2): return cs.textSimilarityStartProfiler(self, doc_col_1, doc_col_2) def get_conversion_method(self): return cs.get_one_true_option(self.config.get('textConversionMethod','')).lower() def set_features(features,profiler=None): return cs.set_features(features,profiler)
data_profiler_functions.py
import os import sys import numpy as np import scipy import pandas as pd from pathlib import Path default_config = { 'misValueRatio': '1.0', 'numericFeatureRatio': '1.0', 'categoryMaxLabel': '20', 'str_to_cat_len_max': 10 } target_encoding_method_change = {'targetencoding': 'labelencoding'} supported_method = { 'fillNa': { 'categorical' : ['mode','zero','na'], 'numeric' : ['median','mean','knnimputer','zero','drop','na'], }, 'categoryEncoding': ['labelencoding','targetencoding','onehotencoding','na','none'], 'normalization': ['standardscaler','minmax','lognormal', 'na','none'], 'outlier_column_wise': ['iqr','zscore', 'disable', 'na'], 'outlierOperation': ['dropdata', 'average', 'nochange'] } def findiqrOutlier(df): Q1 = df.quantile(0.25) Q3 = df.quantile(0.75) IQR = Q3 - Q1 index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))) return index def findzscoreOutlier(df): z = np.abs(scipy.stats.zscore(df)) index = (z < 3) return index def findiforestOutlier(df): from sklearn.ensemble import IsolationForest isolation_forest = IsolationForest(n_estimators=100) isolation_forest.fit(df) y_pred_train = isolation_forest.predict(df) return y_pred_train == 1 def get_one_true_option(d, default_value=None): if isinstance(d, dict): for k,v in d.items(): if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): return k return default_value def get_boolean(value): if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True): return True else: return False def recommenderStartProfiler(self,modelFeatures): try: self.log.info('----------> FillNA:0') self.data = self.data.fillna(value=0) self.log.info('Status:- !... Missing value treatment done') self.log.info('----------> Remove Empty Row') self.data = self.data.dropna(axis=0,how='all') self.log.info('Status:- !... Empty feature treatment done') userId,itemId,rating = modelFeatures.split(',') self.data[itemId] = self.data[itemId].astype(np.int32) self.data[userId] = self.data[userId].astype(np.int32) self.data[rating] = self.data[rating].astype(np.float32) return self.data except Exception as inst: self.log.info("Error: dataProfiler failed "+str(inst)) return(self.data) def folderPreprocessing(self,folderlocation,folderdetails,deployLocation): try: dataset_directory = Path(folderlocation) dataset_csv_file = dataset_directory/folderdetails['label_csv_file_name'] tfrecord_directory = Path(deployLocation)/'Video_TFRecord' from savp import PreprocessSAVP import csv csvfile = open(dataset_csv_file, newline='') csv_reader = csv.DictReader(csvfile) PreprocessSAVP(dataset_directory,csv_reader,tfrecord_directory) dataColumns = list(self.data.columns) VideoProcessing = True return dataColumns,VideoProcessing,tfrecord_directory except Exception as inst: self.log.info("Error: dataProfiler failed "+str(inst)) def textSimilarityStartProfiler(self, doc_col_1, doc_col_2): import os try: features = [doc_col_1, doc_col_2] pipe = None dataColumns = list(self.data.columns) self.numofCols = self.data.shape[1] self.numOfRows = self.data.shape[0] from transformations.textProfiler import textProfiler self.log.info('-------> Execute Fill NA With Empty String') self.data = self.data.fillna(value=" ") self.log.info('Status:- |... Missing value treatment done') self.data[doc_col_1] = textProfiler().textCleaning(self.data[doc_col_1]) self.data[doc_col_2] = textProfiler().textCleaning(self.data[doc_col_2]) self.log.info('-------> Concatenate: ' + doc_col_1 + ' ' + doc_col_2) self.data['text'] = self.data[[doc_col_1, doc_col_2]].apply(lambda row: ' '.join(row.values.astype(str)), axis=1) from tensorflow.keras.preprocessing.text import Tokenizer pipe = Tokenizer() pipe.fit_on_texts(self.data['text'].values) self.log.info('-------> Tokenizer: Fit on Concatenate Field') self.log.info('Status:- |... Tokenizer the text') self.data[doc_col_1] = self.data[doc_col_1].astype(str) self.data[doc_col_1] = self.data[doc_col_1].astype(str) return (self.data, pipe, self.target_name, features) except Exception as inst: self.log.info("StartProfiler failed " + str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) def set_features(features,profiler=None): if profiler: features = [x for x in features if x not in profiler.added_features] return features + profiler.text_feature return features
aionfls.py
# -*- coding: utf-8 -*- import pandas as pd import numpy as np import logging import os import sys from flwr.common.logger import log from logging import INFO from flserver import flserver class aionfls: def __init__(self): self.confdata=None def configLoad(self,jsonfile): import json jsonfile=str(jsonfile) with open(jsonfile, 'r') as file: self.confdata = json.load(file) return self.confdata def dataload(self,datapath): df = pd.read_csv(datapath) #chunk_size=50000 ## Data preprocess in test dataset, In aion, aion profiler will handle it. df =df[~df.isin([np.nan, np.inf, -np.inf]).any(axis=1)] df=df.reset_index(drop=True) return df # Start Flower server for n rounds of federated learning if __name__ == "__main__": classobj=aionfls() json_file=sys.argv[1] confdata = classobj.configLoad(json_file) data_location = confdata["data_location"] # deploy_location=confdata['deploy_location'] cwd = os.path.abspath(os.path.dirname(__file__)) model_name=confdata['model_name'] version=str(confdata['version']) file_name=model_name+'_'+version+".log" try: fl_log=os.path.normpath(os.path.join(cwd,'logs',file_name)) except Exception as e: classobj.log.info("Log path error. Error Msg: \n",e) logging.basicConfig(filename=fl_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) classobj.log = logging.getLogger('AION') print("===============") print("flserver main function") print("===============") if (confdata['evaluation_required'].lower() == 'false'): df=None aionflobj=flserver(df,confdata) print("flserver condition is false") else: ## User selected option is True print("flserver condition is true") data_location = os.path.normpath(os.path.join(cwd, data_location)) # print("current data_location",data_location) df=classobj.dataload(data_location) aionflobj=flserver(df,confdata) status=aionflobj.runFLServer() classobj.log.info("Aion FL Server run Status: \n"+str(status))
dl_model.py
import tensorflow as tf def dl_regression_model(input_shape, output_shape, optimizer, loss_func, act_func): inputs = tf.keras.Input(shape=(input_shape,)) x = tf.keras.layers.Dense(64, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(inputs) x = tf.keras.layers.Dense(32, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(16, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(8, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) outputs = tf.keras.layers.Dense(output_shape, kernel_initializer='he_normal', bias_initializer='zeros')(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.compile(loss=loss_func , optimizer=optimizer, metrics=["mean_absolute_error", "mean_squared_error", ]) return model def dl_multiClass_classification(input_shape, output_shape, optimizer, loss_func, act_func, last_act_func): inputs = tf.keras.Input(shape=(input_shape,)) x = tf.keras.layers.Dense(64, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(inputs) x = tf.keras.layers.Dense(32, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(16, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(8, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) outputs = tf.keras.layers.Dense(output_shape, kernel_initializer='he_normal', bias_initializer='zeros', activation=last_act_func)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.compile(optimizer, loss_func, metrics=["accuracy"]) return model def dl_binary_classification(input_shape, output_shape, optimizer, loss_func, act_func, last_act_func): inputs = tf.keras.Input(shape=(input_shape,)) x = tf.keras.layers.Dense(64, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(inputs) x = tf.keras.layers.Dense(32, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(16, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(8, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) outputs = tf.keras.layers.Dense(output_shape, kernel_initializer='he_normal', bias_initializer='zeros', activation=last_act_func)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.compile(optimizer, loss_func, metrics=["accuracy"]) return model
flserver.py
# -*- coding: utf-8 -*- """ Created on Wed Jun 15 14:36:11 2022 @author: @aionteam """ import flwr import flwr as fl import tensorflow as tf from typing import Any, Callable, Dict, List, Optional, Tuple import utils from sklearn.metrics import log_loss from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, mean_absolute_error,r2_score from typing import Dict import numpy as np import logging import os # import sys from flwr.common.logger import log from logging import INFO import pickle as pkl from flwr.server.client_proxy import ClientProxy import dl_model from sklearn.preprocessing import StandardScaler import pandas as pd ## Below import can be used when aion specific grpc communication used. # from aionflgrpcserver import aionflgrpcserver # Make TensorFlow logs less verbose os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" os.environ["GRPC_VERBOSITY"] = "debug" # """ AION Federated Learning Server. Geting weights from clients, aggregate the weights by FedAvg algorithm and update the client model.""" class flserver(): def __init__(self,df,confdata): self.log = logging.getLogger('AION') ## Below params will be used later self.confparams=confdata self.df=df self.fl_round=0 print("Inside flserver init func") ## Flower server number of rounds for fl model update (clients-server) def fit_round(self,rnd: int) -> Dict: """Send round number to client.""" self.fl_round=rnd log(INFO, "===========================") self.log.info("federated learning round: "+str(rnd)) log(INFO, "federated learning round: %s ",str(rnd)) log(INFO, "===========================") # print(f"federated learning round: {rnd}") return {"rnd": rnd} def fit_config(self,rnd: int) -> Dict: """Send round number to client.""" self.round_count = rnd log(INFO, "===========================") log(INFO, "Starting round %s ...",str(rnd)) log(INFO, "===========================") model_hyperparams = self.confparams["model_hyperparams"] batch_size = model_hyperparams["batch_size"] local_epochs = model_hyperparams["epochs"] config = { "batch_size": int(batch_size), # "local_epochs": 1 if rnd < 2 else 2, "local_epochs": int(local_epochs), "rnd": rnd, } return config def evaluate_config(self, rnd: int): model_hyperparams = self.confparams["model_hyperparams"] val_batch_size = model_hyperparams["batch_size"] # val_steps = 5 if rnd < 4 else 10 return {"val_batch_size": int(val_batch_size)} ## Loading configuration parameters def configload(self,confparams): try: data=confparams server_ip=str(data["server_IP"]) server_port=str(data["server_port"]) model_name=str(data["model_name"]) num_clients=int(data["min_available_clients"]) min_fit_clients=int(data["min_fit_clients"]) num_train_round=int(data["fl_round"]) data_location=str(data["data_location"]) model_params=data["model_hyperparams"] problem_type=data["problem_type"] server_address=f"{server_ip}:{server_port}" # model_location=str(data["model_store"]) model_version=str(data["version"]) selected_feature=data["selected_feature"] if (type(selected_feature) is str): selected_feature=selected_feature.split(',') target_feature=data["target_feature"] evaluation_required=data["evaluation_required"] self.log.info("Federated Learning <Server IP:Port> "+str(server_address)) except Exception as e: log(INFO, "Reading server config file issue. Err.Msg: %s ",str(e)) return server_address,model_name,num_clients,min_fit_clients,num_train_round,data_location,model_params,problem_type,model_version,selected_feature,target_feature,evaluation_required ## Save the final model def model_save(self,model,model_name,problem_type,version): cwd = os.path.abspath(os.path.dirname(__file__)) model_location=os.path.join(cwd, 'models') model_name=model_name version=str(version) model_name=self.confparams["model_name"] if (model_name.lower() == "deeplearning"): file_name = model_name + '_' +problem_type+'_'+version+ ".h5" else: file_name=file_name = model_name + '_' +problem_type+'_'+version+".sav" saved_model=os.path.normpath(os.path.join(model_location,file_name)) self.log.info("saved_model path: "+str(saved_model)) try: with open (saved_model,'wb') as f: pkl.dump(model,f) return True except Exception as e: self.log.info("fl server model save error. Error Msg: "+str(e)) return False ## Load the model, not used now. If user want to use aion trained model for evaluation at serverside, use this fn. def model_load(self, path): model_name=self.confparams["model_name"] if (model_name.lower() == "deeplearning"): loaded_model = tf.keras.models.load_model(path) else: loaded_model = pkl.load(open(path, 'rb')) return loaded_model # Fo normal ml models, def get_eval_fn, evaluate each round results with own dataset. It is optional, without this, fed server will aggregate (fedAvg) client weights and update results to clients without evaluate. def get_eval_fn(self,model,X,y,model_name,model_version): """Return an evaluation function for server-side evaluation.""" self.log.info("X_eval: \n"+str(X.shape)) self.log.info("y_eval: \n"+str(y.shape)) # scaler = StandardScaler() # X_scaled = scaler.fit_transform(X) # y = pd.get_dummies(y) # y_class = None def evaluate(server_round: int, parameters: fl.common.NDArrays, config: Dict[str, fl.common.Scalar],): # self.log.info("server side fedavg weights \n "+str(parameters)) try: problem_type=self.confparams["problem_type"] # if (self.model_name.lower() == 'logisticregression' ): # loss = log_loss(y, model.predict_proba(X)) # else: # loss = log_loss(y, model.predict(X)) if (problem_type.lower() == 'classification'): if (model_name.lower() == 'logisticregression' ): utils.set_model_params(model, parameters) loss = log_loss(y, model.predict_proba(X)) # loss = log_loss(y, model.predict_proba(X)) accuracy = model.score(X, y) log(INFO, "Server evaluation FL Round: %s processed Weights. -- Loss: %s, -- Accuracy: %s ",str(self.fl_round),str(loss), str(accuracy)) self.log.info("Accuracy: "+str(accuracy)) self.log.info("model coefficients: "+str(model.coef_)) self.log.info("model intercept: "+str(model.intercept_)) problem_type=self.confparams["problem_type"] self.model_save(model,model_name,problem_type,model_version) return loss, {"accuracy": accuracy} else: if (model_name.lower() == 'linearregression' ): print(model, type(model)) print(model.get_params) # rmse = mean_squared_error(y, model.predict(X), square=True) rmse = np.sqrt(mean_squared_error(y, model.predict(X))) mae = mean_absolute_error(y, model.predict(X)) r2=r2_score(y, model.predict(X)) loss = rmse mse=mean_squared_error(y, model.predict(X)) rmse = np.sqrt(mean_squared_error(y, model.predict(X))) mae = mean_absolute_error(y, model.predict(X)) r2=r2_score(y, model.predict(X)) loss = rmse results = { "mean_absolute_error": mae, "mean_squared_error": mse, "root_mean_squared_error": rmse, "r2":r2, } # accuracy=r2 log(INFO, "Server evaluation FL Round: %s processed Weights. -- Loss: %s, -- metrics: %s ",str(self.fl_round),str(rmse), str(results)) self.log.info("model coefficients: "+str(model.coef_)) self.log.info("model intercept: "+str(model.intercept_)) self.model_save(model,model_name,problem_type,model_version) # return loss, len(X), results return loss, results except Exception as e: log(INFO, "evaluate error msg: %s ",str(e)) return evaluate # for deep learn models, def get_eval_fn, evaluate each round results with own dataset. It is optional, without this, fed server will aggregate (fedAvg) client weights and update results to clients without evaluate. def get_eval_fn_dl(self, model,X,y,model_name,model_version): try: scaler = StandardScaler() X_scaled = scaler.fit_transform(X) # y = pd.get_dummies(y) y_class = None def evaluate( server_round: int, weights: fl.common.NDArrays, config: Dict[str, fl.common.Scalar], ) -> Optional[Tuple[float, Dict[str, fl.common.Scalar]]]: # Update model with the latest parameters model.set_weights(weights) problem_type = self.confparams["problem_type"] self.model_save(model, model_name,problem_type, model_version) if model_name == 'deeplearning': if problem_type == 'regression': loss, mean_absolute_error, mean_squared_error = model.evaluate(X_scaled, y, verbose=1) y_pred = model.predict(X_scaled) from sklearn import metrics root_mean_squared_error = np.sqrt(metrics.mean_squared_error(y, y_pred)) log(INFO, "global model mean_absolute_error: %f ",mean_absolute_error) log(INFO, "global model mean_squared_error: %f ",mean_squared_error) log(INFO, "global model root_mean_squared_error: %f ",root_mean_squared_error) return loss, {"mean_absolute_error": mean_absolute_error, "mean_squared_error": mean_squared_error, "root_mean_squared_error": root_mean_squared_error} if problem_type == 'classification': y_class = pd.get_dummies(y) loss, accuracy = model.evaluate(X_scaled, y_class, verbose=1) log(INFO, "global model accuracy: %f ",round(accuracy * 100, 2)) log(INFO, "global model loss: %f ", round(loss, 2)) return loss, {"accuracy": accuracy} except Exception as e: log(INFO, "get_eval_fn_dl error: %s ",str(e)) return evaluate """ Below part is the aion specific grpc functions. To start the grpc server and client. Currently below modules are not used. """ # def callaiongrpcserver(self): # agrpcobj = aionflgrpcserver() # status=agrpcobj.startgrpcerver() # print("server grpc start status: \t",status) # return status # def stopaiongrpcserver(self): # agrpcobj = aionflgrpcserver() # status=agrpcobj.shutserver() # print("server grpc stop status: \t",status) # return status ## This function called from aionflmain.py, and run server. ## Getting flower fl strategy def get_strategy(self,min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn): strategy = fl.server.strategy.FedAvg( min_fit_clients=min_fit_clients, min_available_clients=num_clients, evaluate_fn=eval_fn, on_fit_config_fn=on_fit_config_fn, on_evaluate_config_fn=on_evaluate_config_fn, # initial_parameters=fl.common.weights_to_parameters(model.get_weights()), ) return strategy def runFLServer(self): try: server_address,model_name,num_clients,min_fit_clients,num_train_round,data_location,model_params,problem_type,model_version,selected_feature,target_feature,evaluation_required = self.configload(self.confparams) df = self.df if (evaluation_required.lower() == 'true'): ## One more check for NaN,Inf occurance in dataframe df =df[~df.isin([np.nan, np.inf, -np.inf]).any(axis=1)] ## Remove index if passed. df=df.reset_index(drop=True) y=df[target_feature] X=df[selected_feature] if (problem_type.lower() == "classification"): if (model_name.lower() == "logisticregression"): #n_classes = df[target_feature].nunique() no_classes = len(df.groupby(target_feature).count()) no_features=len(selected_feature) self.log.info("no_classes: "+str(no_classes)) self.log.info("no_features: "+str(no_features)) modelName="logisticregression" try: model = LogisticRegression(**model_params, warm_start=True) except Exception as e: self.log.info("LR model error: \n"+str(e)) status=utils.setmodelName(modelName) utils.set_initial_params(model,no_classes,no_features) eval_fn=self.get_eval_fn(model,X,y,model_name,model_version) on_fit_config_fn=self.fit_round on_evaluate_config_fn=None min_fit_clients=2 strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn) elif (model_name.lower() == "deeplearning"): # model_hyperparams = self.confparams["model_hyperparams"] optimizer = model_params["optimizer"] loss_func = model_params["losses"] act_func = model_params["activation"] last_act_func = model_params["last_activation"] input_shape = X.shape[1] # len(selected_feature) output_shape = len(y.value_counts()) model = None if output_shape == 2: if last_act_func == "sigmoid" and loss_func == "binary_crossentropy": model = dl_model.dl_binary_classification(input_shape, output_shape, optimizer, loss_func, act_func, last_act_func) elif last_act_func == "softmax" and loss_func == "categorical_crossentropy": model = dl_model.dl_binary_classification(input_shape, output_shape, optimizer, loss_func, act_func, last_act_func) else: model = dl_model.dl_multiClass_classification(input_shape, output_shape, optimizer, loss_func, act_func, last_act_func) print(model.summary()) eval_fn=self.get_eval_fn_dl(model,X,y,model_name,model_version) on_fit_config_fn=self.fit_config on_evaluate_config_fn=self.evaluate_config strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn) elif(problem_type.lower() == "regression"): if (model_name.lower() == "linearregression"): model=LinearRegression(**model_params) status=utils.setmodelName(model_name) utils.set_initial_params_reg(model,X.shape[0],len(selected_feature)) # utils.set_initial_params_reg(model,X.shape[0],X.shape[1]) eval_fn=self.get_eval_fn(model,X,y,model_name,model_version) on_fit_config_fn=self.fit_round on_evaluate_config_fn=None min_fit_clients=2 strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn) elif(model_name.lower() == "deeplearning"): input_shape = X.shape[1] # len(selected_feature) output_shape = len(y.value_counts()) optimizer = model_params["optimizer"] loss_func = model_params["losses"] act_func = model_params["activation"] model = None model = dl_model.dl_regression_model(input_shape, 1, optimizer, loss_func, act_func) eval_fn=self.get_eval_fn_dl(model,X,y,model_name,model_version) on_fit_config_fn=self.fit_config on_evaluate_config_fn=self.evaluate_config strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn) elif (evaluation_required.lower() == 'false'): eval_fn=None if (model_name.lower() == "deeplearning"): # min_fit_clients =int( model_params["min_fit_clients"]) on_fit_config_fn=self.fit_config on_evaluate_config_fn=self.evaluate_config strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn) else: min_fit_clients=0 on_fit_config_fn=self.fit_round on_evaluate_config_fn=None # strategy = self.get_strategy(min_fit_clients,num_clients,eval_fn,on_fit_config_fn,on_evaluate_config_fn) strategy = fl.server.strategy.FedAvg( min_available_clients=num_clients, eval_fn=None, on_fit_config_fn=self.fit_round,) else: log(INFO, "Please opt server evaluation as True or False in server configuration file.") log(INFO, "Federated learning Server started at @: %s ",str(server_address)) server_rnd=1 while (1): try: fl.server.start_server(server_address=server_address, strategy=strategy, config=fl.server.ServerConfig(num_rounds=num_train_round))# config={"num_rounds": num_train_round})#config=fl.server.ServerConfig(num_rounds=3) #,force_final_distributed_eval=True) except Exception as e: log(INFO, "Server exception: %s ",str(e)) log(INFO, "AION federated learning server completed for execution cycle: %s ",str(server_rnd)) # Evaluate the final trained model server_rnd+=1 log(INFO, "AION federated learning server execution successfully completed. Please check the log file for more information.") return True except Exception as e: self.log.info("AION Federated Learning Server run error. Error Msg: "+str(e)) log(INFO, "Server not executing, err.msg: %s ",str(e)) return False # Start Flower server for n rounds of federated learning # if __name__ == "__main__": # ''' Testing purpose code ''' # super_obj=flserver1() # json_file=sys.argv[1] # super_obj.log.info("User json_file: \n"+str(json_file)) # # configfile=None # server_address,model_name,num_clients,num_train_round,data_location,model_version,model_version,selected_feature,target_feature = super_obj.configload(super_obj.confparams) # df = pd.read_csv(data_location) # # df=super_obj.df # df =df[~df.isin([np.nan, np.inf, -np.inf]).any(1)] # df=df.reset_index(drop=True) # y=df[target_feature] # X = df.drop(target_feature, axis=1) # no_classes = len(df.groupby(target_feature).count()) # no_features=len(selected_feature.split(',')) # print("no_classes: \n",no_classes) # print("no_features: \n",no_features) # # num_classes = y_train.apply(pd.Series.nunique) # if (model_name.lower() == "logisticregression"): # modelName="logisticregression" # model = LogisticRegression(penalty="l2",max_iter=10, warm_start=True) # ## May be used in future (model load for server side eval) # # model=super_obj.model_load(model_location) # status=utils.setmodelName(modelName) # utils.set_initial_params(model,no_classes,no_features) # strategy = fl.server.strategy.FedAvg( # min_available_clients=num_clients, # eval_fn=super_obj.get_eval_fn(model,X,y), # on_fit_config_fn=super_obj.fit_round,) # # super_obj.log.info("Stating federated learning server.....\n") # log(INFO, "Stating AION federated learning server.....") # fl.server.start_server(server_address, strategy=strategy, config={"num_rounds": num_train_round}) # # super_obj.log.info("federated learning server execution completed.\n") # log(INFO, "AION federated learning server execution completed.....")
utils.py
from typing import Tuple, Union, List import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import SGDClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from flwr.common.logger import log from logging import INFO XY = Tuple[np.ndarray, np.ndarray] Dataset = Tuple[XY, XY] LogRegParams = Union[XY, Tuple[np.ndarray]] XYList = List[XY] modelUsed=None modelname=None def setmodelName(modelselected): try: modelname=str(modelselected) print("setmodelName ,given modelname: \n",modelname) if (modelname.lower() == 'logisticregression'): modelUsed=LogisticRegression() return True elif (modelname.lower() == "linearregression"): modelUsed = LinearRegression() return True elif (modelname.lower() == "sgdclassifier"): #from sklearn.linear_model import SGDClassifier modelUsed=SGDClassifier() return True elif (modelname.lower() == "knn"): modelUsed = KNeighborsClassifier() return True elif (modelname.lower() == "decisiontreeclassifier"): modelUsed = DecisionTreeClassifier() return True else: return False except Exception as e: log(INFO, "set fl model name fn issue: ",e) def get_model_parameters(model:modelUsed) -> LogRegParams: """Returns the paramters of a sklearn LogisticRegression model.""" model_name=model.__class__.__name__ if model.fit_intercept: params = (model.coef_, model.intercept_) else: params = (model.coef_,) return params def set_model_params( model:modelUsed, params: LogRegParams ) -> modelUsed: """Sets the parameters of a sklean LogisticRegression model.""" model.coef_ = params[0] model_name=model.__class__.__name__ try: if model.fit_intercept: model.intercept_ = params[1] except Exception as e: log(INFO, "set_model_params fn issue: ",e) pass return model def set_initial_params_reg(model,no_vals,no_features): """Sets initial parameters as zeros Required since model params are uninitialized until model.fit is called. But server asks for initial parameters from clients at launch. Refer to sklearn.linear_model.LogisticRegression documentation for more information. """ no_vals = no_vals n_features = no_features # model.classes_ = np.array([i for i in range(n_classes)]) model.coef_ = np.zeros( n_features,) model_name=model.__class__.__name__ try: if model.fit_intercept: # model.intercept_ = np.ones((no_vals,1)) model.intercept_ = np.zeros((no_vals,)) except Exception as e: log(INFO, "set_initial_params fn issue: ",e) pass def set_initial_params(model,no_classes,no_features): """Sets initial parameters as zeros Required since model params are uninitialized until model.fit is called. But server asks for initial parameters from clients at launch. Refer to sklearn.linear_model.LogisticRegression documentation for more information. """ n_classes = no_classes n_features = no_features model.classes_ = np.array([i for i in range(n_classes)]) model.coef_ = np.zeros((n_classes, n_features)) model_name=model.__class__.__name__ try: if model.fit_intercept: model.intercept_ = np.zeros((n_classes,)) except Exception as e: log(INFO, "set_initial_params fn issue: ",e) pass def shuffle(X: np.ndarray, y: np.ndarray) -> XY: """Shuffle X and y.""" rng = np.random.default_rng() idx = rng.permutation(len(X)) return X[idx], y[idx] def partition(X: np.ndarray, y: np.ndarray, num_partitions: int) -> XYList: """Split X and y into a number of partitions.""" return list( zip(np.array_split(X, num_partitions), np.array_split(y, num_partitions)) )
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
incBatchLearning.py
import sys import os import pickle import json import timeit import warnings import traceback import logging from pathlib import Path warnings.filterwarnings("ignore") import numpy as np import pandas as pd import matplotlib.pyplot as plt from pandas import json_normalize import shutil from word2number import w2n from pytz import timezone import datetime from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score, accuracy_score, r2_score,mean_absolute_error, mean_squared_error, recall_score, precision_score, f1_score from river import stream class incBatchLearner(): def __init__(self): self.home = os.path.dirname(os.path.abspath(__file__)) self.configPath = os.path.join(self.home, 'production', 'Config.json') self.configDict = {} self.updConfigDict = None self.incFillPath = os.path.join(self.home,'production','profiler','incFill.pkl') self.incOutlierRemPath = os.path.join(self.home, 'production', 'profiler', 'incOutlierRem.pkl') self.incLabelMappingPath = os.path.join(self.home,'production', 'profiler' , 'incLabelMapping.pkl') self.incCatEncoderPath = os.path.join(self.home, 'production' , 'profiler', 'incCatEncoder.pkl') self.incScalerPath = os.path.join(self.home, 'production', 'profiler','incScaler.pkl') self.testPath = os.path.join(self.home, 'data', 'test.csv') self.modelName = '' self.incFill = None self.incLabelMapping = None self.incCatEncoder = None self.incScaler = None self.incOutlierRem = None self.model = None self.targetCol = None self.numFtrs = [] self.catFtrs = [] self.allFtrs = [] self.logFileName=os.path.join(self.home,'log','model_training_logs.log') filehandler = logging.FileHandler(self.logFileName, 'a','utf-8') formatter = logging.Formatter('%(message)s') filehandler.setFormatter(formatter) self.log = logging.getLogger('eion') self.log.propagate = False self.log.addHandler(filehandler) self.log.setLevel(logging.INFO) def readData(self, data, isTest = False): if not isTest: self.log.info('New Data Path: '+str(data)) else: self.log.info('Test Data Path: '+str(data)) startTime = timeit.default_timer() if os.path.splitext(data)[1] == ".tsv": df=pd.read_csv(data,encoding='utf-8',sep='\t') elif os.path.splitext(data)[1] == ".csv": df=pd.read_csv(data,encoding='utf-8') elif os.path.splitext(data)[1] == ".dat": df=pd.read_csv(data,encoding='utf-8') else: if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) df = json_normalize(jsonData) dataLoadTime = timeit.default_timer() - startTime self.log.info('\nData Load time(sec) :'+str(dataLoadTime)) self.log.info('\n First ten rows of new data') self.log.info(df.head(10)) self.log.info('Data Frame shape: '+str(df.shape)) df.rename(columns=lambda x:x.strip(), inplace=True) return df def readConfig(self): with open(self.configPath, 'r', encoding= 'utf8') as f: self.configDict = json.load(f) self.configDict['partialFit']+=1 self.log.info('************* Partial Fit '+str(self.configDict['partialFit'])+' *************** \n') msg = '-------> Execution Start Time: '+ datetime.datetime.now(timezone("Asia/Kolkata")).strftime('%Y-%m-%d %H:%M:%S' + ' IST') self.log.info(msg) self.targetCol = self.configDict['targetCol'] if 'numFtrs' in self.configDict: self.numFtrs = self.configDict['numFtrs'] if 'catFtrs' in self.configDict: self.catFtrs = self.configDict['catFtrs'] if 'allNumCols' in self.configDict: self.allNumCols = self.configDict['allNumCols'] if 'allCatCols' in self.configDict: self.allCatCols = self.configDict['allCatCols'] if 'encCols' in self.configDict: self.encCols = self.configDict['encCols'] if 'wordToNumCols' in self.configDict: self.wordToNumericCols = self.configDict['wordToNumCols'] self.emptyFtrs = self.configDict['emptyFtrs'] if 'encTarget' in self.configDict: self.encTarget = self.configDict['encTarget'] if 'noOfClasses' in self.configDict: self.allClasses = list(range(int(self.configDict['noOfClasses']))) self.misval_ratio = self.configDict['misval_ratio'] self.allFtrs = self.configDict['allFtrs'] self.modelName = self.configDict['modelName'] self.problemType = self.configDict['problemType'] self.modelPath = os.path.join(self.home, 'production', 'model', self.modelName+'.pkl') self.scoreParam = self.configDict['scoreParam'] self.score = self.configDict['score'] def pickleLoad(self, file, filename): if os.path.exists(file): with open(file, 'rb') as f: model = pickle.load(f) file_size = os.path.getsize(file) self.log.info(str(filename)+" size is :"+str(file_size)+"bytes") return model else: return None def s2n(self,value): try: x=eval(value) return x except: try: return w2n.word_to_num(value) except: return np.nan def convertWordToNumeric(self,dataframe,feature): try: dataframe[feature]=dataframe[feature].apply(lambda x: self.s2n(x)) return dataframe except Exception as inst: self.log.info("convertWordToNumeric Failed ===>"+str(inst)) return dataframe def pickleDump(self, model, path): if model is not None: with open(path, 'wb') as f: pickle.dump(model, f) def splitTrainTest(self,X,y): if self.problemType.lower() == 'regression': xtrain,xtest,ytrain,ytest=train_test_split(X,y,test_size=0.2,shuffle=True) else: try: xtrain,xtest,ytrain,ytest=train_test_split(X,y,stratify=y,test_size=0.2,shuffle=True) except: xtrain,xtest,ytrain,ytest=train_test_split(X,y,test_size=0.2,shuffle=True) return xtrain,xtest,ytrain,ytest def loadSavedModels(self): self.incFill = self.pickleLoad(self.incFillPath, 'Online Missing Value Filler') self.incLabelMapping = self.pickleLoad(self.incLabelMappingPath, 'Online Label Encoder') self.incCatEncoder = self.pickleLoad(self.incCatEncoderPath, 'Online Categorical Encoder') self.incScaler = self.pickleLoad(self.incScalerPath, 'Online Scaler') self.incOutlierRem = self.pickleLoad(self.incOutlierRemPath, 'Online Outlier Detector') self.model = self.pickleLoad(self.modelPath, str(os.path.basename(self.modelPath))[:-4]) self.log.info('\nData Profiler and ML models loaded in Memory') def saveModels(self): os.makedirs(os.path.join(self.home, 'production', 'profiler')) os.makedirs(os.path.join(self.home, 'production', 'model')) if type(self.configDict['num_fill']) == type({}) or type(self.configDict['cat_fill']) == type({}): self.pickleDump(self.incFill, self.incFillPath) self.pickleDump(self.incLabelMapping, self.incLabelMappingPath) self.pickleDump(self.incCatEncoder, self.incCatEncoderPath) self.pickleDump(self.incScaler, self.incScalerPath) self.pickleDump(self.incOutlierRem, self.incOutlierRemPath) self.pickleDump(self.model, self.modelPath) self.log.info('Models saved into production') def saveConfig(self): with open(self.configPath, 'w', encoding= 'utf8') as f: json.dump(self.updConfigDict, f, ensure_ascii=False) def apply_river_model(self, x, profModel, isTest): if not isTest: profModel.learn_one(x) return pd.Series(profModel.transform_one(x)) def apply_enc(self, x, isTest): if not isTest: y = x[self.encTarget] self.incCatEncoder.learn_one(x, y) return pd.Series(self.incCatEncoder.transform_one(x)) def apply_od_pipe(self, x): score = self.incOutlierRem.score_one(x) is_anomaly = self.incOutlierRem.classify(score) self.incOutlierRem.learn_one(x) return is_anomaly def dataFramePreProcess(self, df): df = df.replace('-', np.NaN) df = df.replace('?', np.NaN) df = df.replace(r'^\s*$', np.NaN, regex=True) columns = list(df.columns) if self.wordToNumericCols: for ftr in self.wordToNumericCols: if ftr in columns: self.log.info('Converting '+ftr+' to numeric type...') tempDataFrame=df.copy(deep=True) testDf = self.convertWordToNumeric(tempDataFrame,ftr) try: df[ftr]=testDf[ftr].astype(float) except: pass columns = list(df.columns) for empCol in self.emptyFtrs: if empCol in columns: df = df.drop(columns=[empCol]) columns = list(df.columns) self.log.info( 'Detecting Missing Values') nonNAArray=[] numOfRows = df.shape[0] for i in columns: numNa=df.loc[(pd.isna(df[i])),i ].shape[0] nonNAArray.append(tuple([i,numNa])) self.missingCols = [] self.emptyCols = [] for item in nonNAArray: numofMissingVals = item[1] if(numofMissingVals !=0): self.log.info('-------> Feature '+str(item[0])) self.log.info('----------> Number of Empty Rows '+str(numofMissingVals)) self.missingCols.append(item[0]) if(numofMissingVals >= numOfRows * self.misval_ratio): self.log.info('----------> Empty: Yes') self.log.info('----------> Permitted Rows: '+str(int(numOfRows * self.misval_ratio))) self.emptyCols.append(item[0]) if(len(self.missingCols) !=0): self.log.info( '----------- Detecting for Missing Values End -----------\n') else: self.log.info( '-------> Missing Value Features :Not Any') self.log.info( '----------- Detecting for Missing Values End -----------\n') return df def profiler(self, df, isTest=False): if not isTest: self.log.info('Starting profiling of New Training Data') else: self.log.info('Starting profiling of Testing Data') startTime = timeit.default_timer() df = self.dataFramePreProcess(df) if 'num_fill' in self.configDict: if self.configDict['num_fill'] == 'drop': df = df.dropna(axis = 0, subset=self.allNumCols) elif self.configDict['num_fill'] == 'zero': df[self.allNumCols] = df[self.allNumCols].fillna(value = 0.0) else: df[self.allNumCols]= df[self.allNumCols].apply(pd.to_numeric) df = df.astype(object).where(df.notna(), None) #river expects nan values to be None df[self.allNumCols]= df[self.allNumCols].apply(lambda row: self.apply_river_model(row.to_dict(), self.incFill['num_fill'], isTest), axis='columns') if not isTest: self.updConfigDict['num_fill'] = {col:self.incFill['num_fill'].stats[col].get() for col in self.allNumCols} if 'cat_fill' in self.configDict: if self.configDict['cat_fill'] == 'drop': df = df.dropna(axis = 0, subset=self.allCatCols) elif self.configDict['cat_fill'] == 'zero': df[self.allCatCols] = df[self.allCatCols].fillna(value = 0.0) else: df = df.astype(object).where(df.notna(), None) df[self.allCatCols]= df[self.allCatCols].apply(lambda row: self.apply_river_model(row.to_dict(),self.incFill['cat_fill'], isTest), axis='columns') if not isTest: self.updConfigDict['cat_fill'] = {col:self.incFill['cat_fill'].stats[col].get() for col in self.allCatCols} if not isTest: self.log.info('Missing value profiler model updated') if self.incLabelMapping: uq_classes = df[self.targetCol].unique() le_classes = list(self.incLabelMapping.classes_) uq_classes = [type(le_classes[0])(x) for x in uq_classes] unseen_classes = set(uq_classes) - set(le_classes) self.log.info('Existing classes: '+str(le_classes)) if len(unseen_classes)>0: self.log.info('New unseen classes: '+str(unseen_classes)) le_classes.extend(unseen_classes) from sklearn.preprocessing import LabelEncoder self.incLabelMapping = LabelEncoder() self.incLabelMapping.fit(le_classes) self.log.info(self.incLabelMapping.classes_) self.log.info('Label encoder refitted with new unseen classes') df[self.targetCol] = df[self.targetCol].apply(str) df[self.targetCol] = self.incLabelMapping.transform(df[self.targetCol]) if not isTest: self.log.info('Target column label encoding is done') if self.incCatEncoder: if self.problemType.lower() == 'regression': from sklearn.preprocessing import StandardScaler sc = StandardScaler() self.encTarget = 'scaledTarget' df['scaledTarget'] = sc.fit_transform(df[self.targetCol].to_numpy().reshape(-1,1)) transformed_data = df[self.encCols].apply(lambda row: self.apply_enc(row.to_dict(), isTest), axis='columns') if self.targetCol in transformed_data.columns: transformed_data.drop(self.targetCol, inplace=True, axis = 1) df[self.catFtrs] = transformed_data if not isTest: self.updConfigDict['catEnc'] = [] if len(self.catFtrs) == 1: col = self.catFtrs[0] self.configDict['catEnc'].append({col:self.incCatEncoder['TargetAgg'].state.to_dict()}) else: for i, col in enumerate(self.catFtrs): if i==0: no = '' else: no = str(i) self.configDict['catEnc'].append({col:self.incCatEncoder['TransformerUnion']['TargetAgg'+no].state.to_dict()}) self.log.info('Categorical encoding is done and profiler model updated') if self.incScaler: if not isTest: self.incScaler = self.incScaler.partial_fit(df[self.numFtrs]) self.log.info('Numerical features scaled and profiler model updated') df[self.numFtrs] = self.incScaler.transform(df[self.numFtrs]) if self.incOutlierRem and not isTest: df = df[df[self.numFtrs].apply(lambda x: False if self.apply_od_pipe(x.to_dict()) else True, axis=1)] df.reset_index(drop=True, inplace=True) self.log.info('Outliers removed and profiler model updated') if not isTest: self.log.info('Check config file in production folder for updated profiler values') profilerTime = timeit.default_timer() - startTime self.log.info('\nProfiling time(sec) :'+str(profilerTime)) return df def riverTrain(self, X, Y): trainStream = stream.iter_pandas(X, Y) for i, (xi, yi) in enumerate(trainStream): if yi!=None: self.model.learn_one(xi, yi) def riverEvaluate(self, xtest): testStream = stream.iter_pandas(xtest) preds = [] for xi,yi in testStream: pred = self.model.predict_one(xi) preds.append(pred) return preds def trainModel(self,df): startTime = timeit.default_timer() X = df[self.allFtrs] Y = df[self.targetCol] try: self.riverTrain(X,Y) trainTime = timeit.default_timer() - startTime self.log.info('\nModel Training time(sec) :'+str(trainTime)) self.log.info(self.modelName+' model updated') self.log.info('First fit model params are '+str(self.configDict['modelParams'])) except Exception as e: raise e def archiveModels(self): source = os.path.join(self.home, 'production') archivePath = os.path.join(self.home,'archives') if os.path.isdir(archivePath): NoOfArchives = sum(os.path.isdir(os.path.join(self.home,'archives',str(i))) for i in os.listdir(archivePath)) destination = os.path.join(self.home,'archives',str(NoOfArchives+1)) else: destination = os.path.join(archivePath,'1') if not os.path.exists(destination): os.makedirs(destination) allfiles = os.listdir(source) for f in allfiles: src_path = os.path.join(source, f) dst_path = os.path.join(destination, f) shutil.move(src_path, dst_path) self.log.info('Previous production models archived') def get_score(self,metric,actual,predict): if 'accuracy' in str(metric).lower(): score = accuracy_score(actual,predict) score = score*100 elif 'recall' in str(metric).lower(): score = recall_score(actual,predict,average='macro') score = score*100 elif 'precision' in str(metric).lower(): score = precision_score(actual,predict,average='macro') score = score*100 elif 'f1_score' in str(metric).lower(): score = f1_score(actual,predict, average='macro') score = score*100 elif 'roc_auc' in str(metric).lower(): try: score = roc_auc_score(actual,predict,average="macro") except: try: actual = pd.get_dummies(actual) predict = pd.get_dummies(predict) score = roc_auc_score(actual,predict, average='weighted', multi_class='ovr') except: score = 0 score = score*100 elif ('mse' in str(metric).lower()) or ('neg_mean_squared_error' in str(metric).lower()): score = mean_squared_error(actual,predict) elif ('rmse' in str(metric).lower()) or ('neg_root_mean_squared_error' in str(metric).lower()): score=mean_squared_error(actual,predict,squared=False) elif ('mae' in str(metric).lower()) or ('neg_mean_absolute_error' in str(metric).lower()): score=mean_absolute_error(actual,predict) elif 'r2' in str(metric).lower(): score=r2_score(actual,predict)*100 return round(score,2) def checkColumns(self, df): self.log.info('Checking DataColumns in new data') dfCols = list(df.columns) allCols = self.allFtrs.copy() allCols.append(self.targetCol) missingCols = [] for col in allCols: if col not in dfCols: missingCols.append(col) if len(missingCols)>0: raise Exception('DataFrame is missing columns: '+str(missingCols)) else: self.log.info('All required columns are present: '+str(list(dfCols)[:500])) def plotMetric(self): y = self.configDict['metricList'] fedrows = self.configDict['trainRowsList'] fig = plt.figure() ax = fig.gca() if self.configDict['problemType'] == 'classification': ax.set_yticks(np.arange(0, 110, 10)) plt.ylim(ymin=0) if self.configDict['problemType'] == 'regression': minMet = min(y) maxMet = max(y) plt.ylim(minMet - 10, maxMet+10) plt.plot(y) plt.ylabel(self.scoreParam) plt.xlabel('Partial Fits') plt.title(str(self.scoreParam)+' over training rows') if type(fedrows[0])!=type(''): fedrows = [str(x) for x in fedrows] x = list(range(len(fedrows))) for i in range(len(fedrows)): plt.annotate(fedrows[i], (x[i], y[i] + 5)) if self.configDict['problemType'] == 'classification': plt.annotate(round(y[i],1), (x[i], y[i] - 3)) plt.grid() plt.savefig(os.path.join(self.home, 'production','metric')) return def updateLearning(self,data): try: self.readConfig() self.updConfigDict = self.configDict.copy() df = self.readData(data) self.checkColumns(df) self.loadSavedModels() X = df[self.allFtrs] y = df[self.targetCol] xtrain,xtest,ytrain,ytest = self.splitTrainTest(X,y) dftrain = pd.concat((xtrain, ytrain), axis = 1) dftest = pd.concat((xtest, ytest), axis = 1) dftrain = self.profiler(dftrain) dftest = self.profiler(dftest, isTest = True) xtest = dftest[self.allFtrs] ytest = dftest[self.targetCol] self.trainModel(dftrain) preds = self.riverEvaluate(xtest) score = self.get_score(self.scoreParam, ytest, preds) self.updConfigDict['score'] = score self.log.info('Previous '+self.scoreParam+': '+str(self.configDict['score'])) self.log.info('Current '+self.scoreParam+': '+str(self.updConfigDict['score'])) self.configDict['trainRowsList'].append(self.configDict['trainRowsList'][-1]+xtrain.shape[0]) self.log.info('Number of data points trained on so far: '+str(self.configDict['trainRowsList'][-1])) self.configDict['metricList'].append(self.updConfigDict['score']) self.archiveModels() self.plotMetric() self.saveModels() self.saveConfig() msg = self.scoreParam+': Previous:'+str(self.configDict['score'])+' Current:'+ str(self.updConfigDict['score']) output = {"status":"SUCCESS","Msg":msg} self.log.info(str(output)) except Exception as e: print(traceback.format_exc()) self.log.info('Partial Fit Failed '+str(traceback.format_exc())) if self.updConfigDict != None: self.saveConfig() output = {"status":"FAIL","Msg":str(e).strip('"')} return json.dumps(output) if __name__ == "__main__": incBLObj = incBatchLearner() output = incBLObj.updateLearning(sys.argv[1]) print("aion_learner_status:",output)
incBatchPrediction.py
import sys import os import pickle import json import traceback import warnings warnings.filterwarnings("ignore") import numpy as np import pandas as pd import scipy from pandas import json_normalize from word2number import w2n from river import stream class incBatchPredictor(): def __init__(self): self.home = os.path.dirname(os.path.abspath(__file__)) self.configPath = os.path.join(self.home, 'production', 'Config.json') self.configDict = {} self.incFillPath = os.path.join(self.home,'production','profiler','incFill.pkl') self.incLabelMappingPath = os.path.join(self.home,'production', 'profiler' , 'incLabelMapping.pkl') self.incCatEncoderPath = os.path.join(self.home, 'production' , 'profiler', 'incCatEncoder.pkl') self.incScalerPath = os.path.join(self.home, 'production', 'profiler','incScaler.pkl') self.incFill = None self.incLabelMapping = None self.incCatEncoder = None self.incScaler = None self.model = None self.targetCol = None self.modelName = '' self.problemType = '' self.numFtrs = [] self.catFtrs = [] def readData(self, data): try: if os.path.splitext(data)[1] == ".tsv": df=pd.read_csv(data,encoding='utf-8',sep='\t') elif os.path.splitext(data)[1] == ".csv": df=pd.read_csv(data,encoding='utf-8') elif os.path.splitext(data)[1] == ".dat": df=pd.read_csv(data,encoding='utf-8') else: if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) df = json_normalize(jsonData) df.rename(columns=lambda x:x.strip(), inplace=True) return df except KeyError as e: output = {"status":"FAIL","message":str(e).strip('"')} print(json.dumps(output)) except Exception as e: output = {"status":"FAIL","message":str(e).strip('"')} print(json.dumps(output)) def readConfig(self): with open(self.configPath, 'r', encoding= 'utf8') as f: self.configDict = json.load(f) self.targetCol = self.configDict['targetCol'] if 'numFtrs' in self.configDict: self.numFtrs = self.configDict['numFtrs'] if 'catFtrs' in self.configDict: self.catFtrs = self.configDict['catFtrs'] if 'allNumCols' in self.configDict: self.allNumCols = self.configDict['allNumCols'] if 'allCatCols' in self.configDict: self.allCatCols = self.configDict['allCatCols'] if 'wordToNumCols' in self.configDict: self.wordToNumericCols = self.configDict['wordToNumCols'] self.emptyFtrs = self.configDict['emptyFtrs'] self.allFtrs = self.configDict['allFtrs'] self.modelName = self.configDict['modelName'] self.problemType = self.configDict['problemType'] self.modelPath = os.path.join(self.home, 'production', 'model', self.modelName+'.pkl') self.scoreParam = self.configDict['scoreParam'] self.score = self.configDict['score'] def pickleLoad(self, file): if os.path.exists(file): with open(file, 'rb') as f: model = pickle.load(f) return model else: return None def s2n(self,value): try: x=eval(value) return x except: try: return w2n.word_to_num(value) except: return np.nan def convertWordToNumeric(self,dataframe,feature): try: dataframe[feature]=dataframe[feature].apply(lambda x: self.s2n(x)) return dataframe except Exception as inst: self.log.info("convertWordToNumeric Failed ===>"+str(inst)) return dataframe def loadSavedModels(self): self.incFill = self.pickleLoad(self.incFillPath) self.incLabelMapping = self.pickleLoad(self.incLabelMappingPath) self.incCatEncoder = self.pickleLoad(self.incCatEncoderPath) self.incScaler = self.pickleLoad(self.incScalerPath) self.model = self.pickleLoad(self.modelPath) def apply_river_model(self, x, profModel): print(profModel.imputers) return pd.Series(profModel.transform_one(x)) def apply_enc(self, x): return pd.Series(self.incCatEncoder.transform_one(x)) def dataFramePreProcess(self, df): df = df.replace(r'^\s*$', np.NaN, regex=True) df = df.replace('-', np.nan) df = df.replace('?', np.nan) columns = list(df.columns) if self.wordToNumericCols: for ftr in self.wordToNumericCols: if ftr in columns: tempDataFrame=df.copy(deep=True) testDf = self.convertWordToNumeric(tempDataFrame,ftr) try: df[ftr]=testDf[ftr].astype(float) except: pass columns = list(df.columns) for empCol in self.emptyFtrs: if empCol in columns: df = df.drop(columns=[empCol]) return df def profiler(self, df): df = df[self.allFtrs] df = self.dataFramePreProcess(df) if 'num_fill' in self.configDict: if self.configDict['num_fill'] == 'drop': df = df.dropna(axis = 0, subset=self.allNumCols) elif self.configDict['num_fill'] == 'zero': df[self.numFtrs] = df[self.numFtrs].fillna(value = 0.0) else: for x in self.numFtrs: if x == self.targetCol: continue df[x] = df[x].fillna(value = self.configDict['num_fill'][x]) if 'cat_fill' in self.configDict: if self.configDict['cat_fill'] == 'drop': df = df.dropna(axis = 0, subset=self.allCatCols) elif self.configDict['cat_fill'] == 'zero': df[self.catFtrs] = df[self.catFtrs].fillna(value = 0.0) else: for x in self.catFtrs: if x == self.targetCol: continue df[x] = df[x].fillna(value = self.configDict['cat_fill'][x]) if self.incCatEncoder: transformed_data = df[self.catFtrs].apply(lambda row: self.apply_enc(row.to_dict()), axis='columns') df[self.catFtrs] = transformed_data if self.incScaler: df[self.numFtrs] = self.incScaler.transform(df[self.numFtrs]) return df def trainedModel(self,X): testStream = stream.iter_pandas(X) preds = [] if self.problemType.lower() == 'regression': for xi,yi in testStream: try: pred = self.model.predict_proba_one(xi) preds.append(pred) except: pred = self.model.predict_one(xi) preds.append(pred) preds = pd.DataFrame(preds) return preds elif self.problemType.lower() == 'classification': for xi,yi in testStream: try: pred = self.model.predict_proba_one(xi) preds.append(pred) except: continue out = pd.DataFrame(preds) return out def apply_output_format(self,df,modeloutput): if self.problemType.lower() == 'regression': df['prediction'] = modeloutput[0] df['prediction'] = df['prediction'].round(2) elif self.problemType.lower() == 'classification': modeloutput = round(modeloutput,2) if modeloutput.shape[1] == 1: df['prediction'] = modeloutput df['prediction'] = df['prediction'].astype(int) else: try: predCol = modeloutput.idxmax(axis=1) df['prediction'] = predCol.astype(int) df['prediction'] = self.incLabelMapping.inverse_transform(df['prediction']) except: df['prediction'] = modeloutput.idxmax(axis=1) df['probability'] = modeloutput.max(axis=1).round(2) modeloutput.columns = modeloutput.columns.astype(int) modeloutput.columns = self.incLabelMapping.inverse_transform(list(modeloutput.columns)) df['remarks'] = modeloutput.apply(lambda x: x.to_json(), axis=1) outputjson = df.to_json(orient='records') outputjson = {"status":"SUCCESS","data":json.loads(outputjson)} return(json.dumps(outputjson)) def predict(self,data): try: df = self.readData(data) dfOrg = df.copy() self.readConfig() if len(self.configDict)!=0: self.loadSavedModels() df = self.profiler(df) modeloutput = self.trainedModel(df) dfOrg = dfOrg[self.allFtrs] output = self.apply_output_format(dfOrg, modeloutput) else: pass except Exception as e: print(traceback.format_exc()) output = {"status":"FAIL","message":str(e).strip('"')} return output if __name__ == "__main__": incBPobj = incBatchPredictor() output = incBPobj.predict(sys.argv[1]) print("predictions:",output)
dl_model.py
import tensorflow as tf def dl_regression_model(input_shape, output_shape, optimizer, loss_func, act_func): inputs = tf.keras.Input(shape=(input_shape,)) x = tf.keras.layers.Dense(64, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(inputs) x = tf.keras.layers.Dense(32, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(16, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(8, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) outputs = tf.keras.layers.Dense(output_shape, kernel_initializer='he_normal', bias_initializer='zeros')(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.compile(loss=loss_func , optimizer=optimizer, metrics=["mean_absolute_error", "mean_squared_error", ]) return model def dl_multiClass_classification(input_shape, output_shape, optimizer, loss_func, act_func, last_act_func): inputs = tf.keras.Input(shape=(input_shape,)) x = tf.keras.layers.Dense(64, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(inputs) x = tf.keras.layers.Dense(32, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(16, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(8, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) outputs = tf.keras.layers.Dense(output_shape, kernel_initializer='he_normal', bias_initializer='zeros', activation=last_act_func)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.compile(optimizer, loss_func, metrics=["accuracy"]) return model def dl_binary_classification(input_shape, output_shape, optimizer, loss_func, act_func, last_act_func): inputs = tf.keras.Input(shape=(input_shape,)) x = tf.keras.layers.Dense(64, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(inputs) x = tf.keras.layers.Dense(32, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(16, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) x = tf.keras.layers.Dense(8, kernel_initializer='he_normal', bias_initializer='zeros', activation=act_func)(x) outputs = tf.keras.layers.Dense(output_shape, kernel_initializer='he_normal', bias_initializer='zeros', activation=last_act_func)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.compile(optimizer, loss_func, metrics=["accuracy"]) return model
aionflc.py
# -*- coding: utf-8 -*- """ Created on Wed May 25 21:16:54 2022 @author: @aionteam """ import tensorflow as tf import warnings import flwr as flower import numpy as np import pandas as pd import os from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, mean_absolute_error,r2_score from sklearn.model_selection import train_test_split from sklearn.metrics import log_loss import utils import logging from flwr.common.logger import log from logging import INFO import time import pickle as pkl import json import sys import random import string from sklearn.preprocessing import StandardScaler import dl_model from sklearn import metrics ## Below import can be used when aion specific grpc communication used. # from aiongrpcclient import aiongrpcclient os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" os.environ["GRPC_VERBOSITY"] = "debug" logger = logging.getLogger('AION') """ The below aion fl client is for sklearn process""" class aionflc(flower.client.NumPyClient): def __init__(self,model,num_rounds,model_name,version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train, X_test, y_train, y_test): self.count=0 self.num_rounds=round(num_rounds) self.model_name=model_name self.version=version self.wait_time=int(wait_time) self.client_id=client_id self.num_records=num_records self.model_overwrite=model_overwrite self.model=model self.problem_type=problem_type self.X_train, self.X_test, self.y_train, self.y_test=X_train, X_test, y_train, y_test # """ The below part not used now. In future, for our own grpc communication, this module will be used.Call this function where we want. Need to modify aiongrpcproto.proto according our requirement.""" # def callaiongrpcclient(self): # clientins = aiongrpcclient() # status=clientins.startgrpcclient() # return status #Save the final model def model_save(self,model): ##Locate standard model dir to save model cwd = os.path.abspath(os.path.dirname(__file__)) model_location=os.path.join(cwd, 'models') try: os.makedirs(model_location) except FileExistsError as fe: # here,model_location already exists pass model_name=self.model_name ## Saving model if (self.model_overwrite.lower() == 'false'): version=str(self.count) if (model_name.lower() == "deeplearning"): file_name=model_name+'_'+self.problem_type+'_'+version+".h5" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: model.save(saved_model) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False else: file_name=model_name+'_'+self.problem_type+'_'+version+".sav" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: with open (saved_model,'wb') as f: pkl.dump(model,f) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False elif (self.model_overwrite.lower() == 'true'): version=str(self.version) if (model_name.lower() == "deeplearning"): file_name=model_name+'_'+self.problem_type+'_'+version+".h5" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: model.save(saved_model) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False else: file_name=model_name+'_'+self.problem_type+'_'+version+".sav" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: with open (saved_model,'wb') as f: pkl.dump(model,f) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False else: ##Write own user instruction pass def get_parameters(self, config): return utils.get_model_parameters(self.model) def get_properties(self,model,time_out): """Return the current client properties.""" client_info={'client_id':self.client_id} time_out=100 return client_info,model,time_out def fit(self, parameters, config): utils.set_model_params(self.model, parameters) with warnings.catch_warnings(): warnings.simplefilter("ignore") num_partitions=round(self.num_rounds) # num_partitions=round(5) xtrain=np.array_split(self.X_train, num_partitions)[self.count] ytrain=np.array_split(self.y_train, num_partitions)[self.count] self.model.fit(xtrain, ytrain) time.sleep(self.wait_time) self.count+=1 print("-- Received Weights from Server.") print(f"\n Training finished for FL round: {config['rnd']}.\n") logger.info("-- Received Weights from Server. ") logger.info("Training finished for FL round: "+str(config['rnd'])+" -- Received Weights from Server") model_param=utils.get_model_parameters(self.model) model_param=list(model_param) return model_param, len(self.X_train),{} # def evaluate(self, parameters, config): # utils.set_model_params(self.model, parameters) # print("******** Test_1 ****************** \n") # if (self.problem_type.lower() == 'classification'): # if (self.model_name.lower() == 'logisticregression' ): # loss = log_loss(self.y_test, self.model.predict_proba(self.X_test)) # print("******** Test_1a ****************** \n") # else: # if (self.model_name.lower() == 'linearregression' ): # print("******** Test_1b ****************** \n") # # loss = log_loss(self.y_test, self.model.predict(self.X_test)) # rmse = np.sqrt(mean_squared_error(self.y_test, self.model.predict(self.X_test))) # mae = mean_absolute_error(self.y_test, self.model.predict(self.X_test)) # r2=r2_score(self.y_test, self.model.predict(self.X_test)) # loss = rmse # # accuracy=r2 # print(f"{self.client_id} Sending weights -- data processed {self.num_records}, -- Loss: {(rmse)}. -- r2: {r2}. ") # logger.info(str(self.client_id)+" Sending weights -- data processed "+str(self.num_records)+".-- Loss: "+str(rmse)+". -- r2: "+str(r2)) # logger.info("FL Client model intercept: "+str(model.intercept_)) # logger.info("FL Client model coefficients: "+str(model.coef_)) # self.model_save(self.model) # return loss, len(self.X_test), {"r2": r2} # print("******** Test_1c ****************** \n") # print("******** Test_2 ****************** \n") # accuracy = self.model.score(self.X_test, self.y_test) # print(f"{self.client_id} Sending weights -- data processed {self.num_records}, -- Loss: {(loss)}. -- accuracy: {accuracy}. ") # logger.info(str(self.client_id)+" Sending weights -- data processed "+str(self.num_records)+".-- Loss: "+str(loss)+". -- accuracy: "+str(accuracy)) # logger.info("FL Client model intercept: "+str(model.intercept_)) # logger.info("FL Client model coefficients: "+str(model.coef_)) # self.model_save(self.model) # return loss, len(self.X_test), {"accuracy": accuracy} def evaluate(self, parameters, config): utils.set_model_params(self.model, parameters) if (self.problem_type.lower() == 'classification'): if (self.model_name.lower() == 'logisticregression' ): loss = log_loss(self.y_test, self.model.predict_proba(self.X_test)) accuracy = self.model.score(self.X_test, self.y_test) print(f"{self.client_id} Sending weights -- data processed {self.num_records}, -- Loss: {(loss)}. -- accuracy: {accuracy}. ") logger.info(str(self.client_id)+" Sending weights -- data processed "+str(self.num_records)+".-- Loss: "+str(loss)+". -- accuracy: "+str(accuracy)) logger.info("FL Client model intercept: "+str(model.intercept_)) logger.info("FL Client model coefficients: "+str(model.coef_)) self.model_save(self.model) return loss, len(self.X_test), {"accuracy": accuracy} elif (self.problem_type.lower() == 'regression'): if (self.model_name.lower() == 'linearregression' ): # loss = log_loss(self.y_test, self.model.predict(self.X_test)) mse=mean_squared_error(self.y_test, self.model.predict(self.X_test)) rmse = np.sqrt(mean_squared_error(self.y_test, self.model.predict(self.X_test))) mae = mean_absolute_error(self.y_test, self.model.predict(self.X_test)) r2=r2_score(self.y_test, self.model.predict(self.X_test)) loss = rmse results = { "mean_absolute_error": mae, "mean_squared_error": mse, "root_mean_squared_error": rmse, "r2":r2, } print(f"{self.client_id} Sending weights -- data processed {self.num_records}, -- Loss: {(rmse)}. -- metrics: {results}. ") logger.info(str(self.client_id)+" Sending weights -- data processed "+str(self.num_records)+".-- Loss: "+str(rmse)+". -- metrics: "+str(results)) logger.info("FL Client model intercept: "+str(self.model.intercept_)) logger.info("FL Client model coefficients: "+str(self.model.coef_)) self.model_save(self.model) return loss, len(self.X_test), results """ The below aion fl client is for deep learning process. Why different client for sklearn and deeplearn ?: Because, flower calling the client object and process all functions (get_parameters,fit and evaluate) internally. So, user space we cannot combine both (sklearn n dl) using if..else. """ class aionflc_dl(flower.client.NumPyClient): def __init__(self,model,num_rounds,model_name,version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train, X_test, y_train, y_test,model_params): self.count=0 self.num_rounds=round(num_rounds) self.model_name=model_name self.version=version self.wait_time=int(wait_time) self.client_id=client_id self.num_records=num_records self.model_overwrite=model_overwrite self.model=model self.problem_type=problem_type self.X_train, self.X_test, self.y_train, self.y_test=X_train, X_test, y_train, y_test self.model_params=model_params # """ The below part not used now. In future, for our own grpc communication, this module will be used.Call this function where we want. Need to modify aiongrpcproto.proto according our requirement.""" # def callaiongrpcclient(self): # clientins = aiongrpcclient() # status=clientins.startgrpcclient() # return status #Save the final model def model_save(self,model): ##Locate standard model dir to save model cwd = os.path.abspath(os.path.dirname(__file__)) model_location=os.path.join(cwd, 'models') try: os.makedirs(model_location) except FileExistsError as fe: # here,model_location already exists pass model_name=self.model_name # version=self.version ## Saving model if (self.model_overwrite.lower() == 'false'): version=str(self.count) if (model_name.lower() == "deeplearning"): file_name=model_name+'_'+self.problem_type+'_'+version+".h5" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: model.save(saved_model) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False else: file_name=model_name+'_'+self.problem_type+'_'+version+".sav" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: with open (saved_model,'wb') as f: pkl.dump(model,f) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False elif (self.model_overwrite.lower() == 'true'): version=str(self.version) if (model_name.lower() == "deeplearning"): file_name=model_name+'_'+self.problem_type+'_'+version+".h5" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: model.save(saved_model) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False else: file_name=model_name+'_'+self.problem_type+'_'+version+".sav" saved_model=os.path.normpath(os.path.join(model_location,file_name)) log(INFO, "flclient saved_model path: %s ",str(saved_model)) try: with open (saved_model,'wb') as f: pkl.dump(model,f) return True except Exception as e: logger.info("model save error. Err.Msg: "+str(e)) return False else: ##Write own user instruction pass def get_parameters(self, config): """Get parameters of the local model.""" return self.model.get_weights() def get_properties(self,model,time_out): """Return the current client properties.""" client_info={'client_id':self.client_id} time_out=100 return client_info,model,time_out def fit(self, parameters, config): """Train parameters on the locally held training set.""" # Update local model parameters self.model.set_weights(parameters) num_partitions=(self.num_rounds) # num_partitions=round(5) xtrain=np.array_split(self.X_train, num_partitions)[self.count] ytrain=np.array_split(self.y_train, num_partitions)[self.count] # y_train = np_utils.to_categorical(y_train, num_classes) # y_test = np_utils.to_categorical(y_test, num_classes) # Get hyperparameters for this round batch_size: int = int(self.model_params["batch_size"]) epochs: int = int(self.model_params["epochs"]) # round: int = config["rnd"] # self.round_id = round log(INFO, "===========================") log(INFO, "Start training model on local client %s round %i", self.client_id, config['rnd']) time.sleep(self.wait_time) self.count+=1 # Train the model using hyperparameters from config history = self.model.fit( xtrain, ytrain, batch_size, epochs, shuffle=False, # validation_split=0.1, validation_data=(self.X_test, self.y_test), verbose=1 ) # Return updated model parameters and results parameters_prime = self.model.get_weights() num_examples_train = len(self.X_train) model_name = self.model_name problem_type = self.problem_type if model_name == "deeplearning": if problem_type == "classification": acc = self.model.history.history['val_accuracy'] log(INFO, "Validated accuracy at the end of current round of client %s : %.2f %%", self.client_id, acc[-1]*100) log(INFO, "Finished training model on local client %s", self.client_id) results = { "loss": history.history["loss"][0], "accuracy": history.history["accuracy"][0], "val_loss": history.history["val_loss"][0], "val_accuracy": history.history["val_accuracy"][0], } if problem_type == "regression": mean_absolute_error = history.history['mean_absolute_error'][0] mean_squared_error = history.history['mean_squared_error'][0] y_pred = self.model.predict(self.X_test) from sklearn import metrics root_mean_squared_error = np.sqrt(metrics.mean_squared_error(self.y_test, y_pred)) log(INFO, "Mean Absolute Error at the end of current round of client %s : %f", self.client_id, mean_absolute_error) log(INFO, "Mean Squared Error at the end of current round of client %s : %f", self.client_id, mean_squared_error) log(INFO, "Root Mean Squared Error at the end of current round of client %s : %f", self.client_id, root_mean_squared_error) log(INFO, "Finished training model on local client %s", self.client_id) results = { "mean_absolute_error": mean_absolute_error, "mean_squared_error": mean_squared_error, "root_mean_squared_error": root_mean_squared_error, } return parameters_prime, num_examples_train, results def evaluate(self, parameters, config): """Evaluate parameters on the locally held test set.""" # Update local model with global parameters self.model.set_weights(parameters) num_partitions=(self.num_rounds) # Get config values # batch_size: int = config["val_batch_size"] batch_size: int = int(self.model_params["batch_size"]) steps: int = np.ceil(len(self.X_test)/batch_size) num_examples_test = len(self.X_test) log(INFO, "Run for only %i steps", steps) # Evaluate global model parameters on the local test data and return results model_name = self.model_name problem_type = self.problem_type self.model_save(self.model) if model_name == "deeplearning": if problem_type == "classification": loss, accuracy = self.model.evaluate(self.X_test, self.y_test,verbose=0) log(INFO, "Client %s : Accuracy %.2f %%", self.client_id, accuracy*100) log(INFO, "Client %s : Loss %.4f ", self.client_id, loss) return loss, num_examples_test, {"accuracy": accuracy} if problem_type == "regression": loss, mean_absolute_error, mean_squared_error = self.model.evaluate(self.X_test, self.y_test, steps=steps,verbose=1) y_pred = self.model.predict(self.X_test) root_mean_squared_error = np.sqrt(metrics.mean_squared_error(self.y_test, y_pred)) log(INFO, "Client %s : mean_absolute_error %f ", self.client_id, mean_absolute_error) log(INFO, "Client %s : mean_squared_error %f ", self.client_id, mean_squared_error) log(INFO, "Client %s : root_mean_squared_error %f ", self.client_id, root_mean_squared_error) return loss, num_examples_test, {"mean_absolute_error": mean_absolute_error, "mean_squared_error": mean_squared_error, "root_mean_squared_error": root_mean_squared_error} def randclientid(s,c): c=string.ascii_uppercase + string.digits return ''.join(random.choice(c) for x in range(s)) ## Loading input data def dataLoad(jsonfile): with open(jsonfile, 'r') as file: data = json.load(file) server_ip=str(data["server_IP"]) server_port=str(data["server_port"]) model_name=str(data["model_name"]) problem_type=str(data["problem_type"]) data_location=str(data["data_location"]) # deploy_location=str(data["deploy_location"]) model_params=data["model_hyperparams"] train_size=int(data["train_size"]) model_version=str(data["version"]) selected_feature=data["selected_feature"] if (type(selected_feature) is str): selected_feature=selected_feature.split(',') model_overwrite=data['model_overwrite'] target_feature=data["target_feature"] num_records=int(data['num_records_per_round']) wait_time=data['wait_time'] server_address=server_ip+':'+server_port # server_address=f"{server_ip}:{server_port}" return server_address,model_name,problem_type,data_location,model_params,model_version,selected_feature,target_feature,train_size,num_records,wait_time,model_overwrite # def getfilepath() """ Main aion federated learning client function call. """ if __name__ == "__main__": ##Client random id gen. rand_id=randclientid(9, "ABC1234567890") client_id='flclient-'+str(rand_id) try: json_file=sys.argv[1] except Exception as e: # sys.stdout.write("Please provide input configuration file. example: < python.exe 'fedclient\aionflc.py' 'fedclient\config.json' > ") log(INFO, "Please provide input configuration file. example: <python.exe 'fedclient\aionflc.py' 'fedclient\config.json'> \n") server_address,model_name,problem_type,data_location,model_params,model_version,selected_feature,target_feature,train_size,num_records,wait_time,model_overwrite = dataLoad(json_file) file_name=model_name+'_'+model_version+".log" cwd = os.path.abspath(os.path.dirname(__file__)) log_location = os.path.join(cwd, 'logs') try: os.makedirs(log_location) except FileExistsError as fe: # here,log_location already exists pass try: logobj = logging.getLogger('AION') fl_log=os.path.normpath(os.path.join(log_location,file_name)) log(INFO, "flclient log file path: %s ",str(fl_log)) logging.basicConfig(filename=fl_log,format='%(asctime)s %(message)s', filemode='w',level=logging.DEBUG) except Exception as e: log(INFO, "logging error. Error Msg: %s ",str(e)) pass ## default data location ~data\inputfile.csv data_location = os.path.normpath(os.path.join(cwd, data_location)) df = pd.read_csv(data_location) df =df[~df.isin([np.nan, np.inf, -np.inf]).any(axis=1)] df=df.reset_index(drop=True) y=df[target_feature] # X = df.drop(target_feature, axis=1) # # print("selected_feature: \n",selected_feature) X=df[selected_feature] input_shape = X.shape[1] # len(selected_feature) output_shape = len(y.value_counts()) test_size=(100-train_size)/100 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size) no_classes = len(df.groupby(target_feature).count()) no_features=len(selected_feature) ## Pass the train data. (X_train, y_train) = utils.partition(X_train, y_train, 1)[0] scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test) # y_train = pd.get_dummies(y_train) # y_test = pd.get_dummies(y_test) y_train_dl = pd.get_dummies(y_train, sparse=True) y_test_dl = pd.get_dummies(y_test, sparse=True) if (problem_type.lower() == "classification"): if (model_name.lower() == "logisticregression"): #n_classes = df[target_feature].nunique() no_classes = len(df.groupby(target_feature).count()) no_features=len(selected_feature) logger.info("no_classes: "+str(no_classes)) logger.info("no_features: "+str(no_features)) modelName="logisticregression" model = None model = LogisticRegression(**model_params, warm_start=True) try: status=utils.setmodelName(model_name) utils.set_initial_params(model,no_classes,no_features) except Exception as e: print("util error: \n",e) num_rounds=round(len(df)/num_records) log(INFO, "Federated learning Client connecting to Server @: %s ",str(server_address)) try: flower.client.start_numpy_client(server_address=server_address, client=aionflc(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train, y_test)) except Exception as e: logger.info("AION FL Client instance error: \n"+str(e)) log(INFO, "AION federated learning Client %s execution completed.",str(client_id)) elif (model_name.lower() == "deeplearning"): optimizer = model_params["optimizer"] loss_func = model_params["losses"] act_func = model_params["activation"] last_act_func = model_params["last_activation"] input_shape = X.shape[1] # len(selected_feature) output_shape = len(y.value_counts()) print(f"input_shape:{input_shape}, output_shape:{output_shape}.") model = None if output_shape == 2: if last_act_func == "sigmoid" and loss_func == "binary_crossentropy": model = dl_model.dl_binary_classification(input_shape, output_shape, optimizer, loss_func, act_func, last_act_func) elif last_act_func == "softmax" and loss_func == "categorical_crossentropy": model = dl_model.dl_binary_classification(input_shape, output_shape, optimizer, loss_func, act_func, last_act_func) else: model = dl_model.dl_multiClass_classification(input_shape, output_shape, optimizer, loss_func, act_func, last_act_func) print(model.summary()) # status=utils.setmodelName(modelName) # utils.set_initial_params(model,no_classes,no_features) num_rounds=round(len(df)/num_records) log(INFO, "Federated learning Client connecting to Server @: %s ",str(server_address)) try: flower.client.start_numpy_client(server_address=server_address, client=aionflc_dl(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train_dl, y_test_dl,model_params)) except Exception as e: logger.info("AION FL Client instance error: \n"+str(e)) log(INFO, "AION federated learning Client %s execution completed.",str(client_id)) logger.info("AION federated learning Client execution completed."+str(client_id)) elif(problem_type.lower() == "regression"): if (model_name.lower() == "linearregression"): # model=LinearRegression(**model_params,warm_start=True) if model_params['fit_intercept'] == 'True': model_params['fit_intercept'] = True else: model_params['fit_intercept'] = False if model_params['copy_X'] == 'True': model_params['copy_X'] = True else: model_params['copy_X'] = False if model_params['positive'] == 'True': model_params['positive'] = True else: model_params['positive'] = False model=LinearRegression(**model_params) status=utils.setmodelName(model_name) utils.set_initial_params_reg(model,X_train.shape[0],X_train.shape[1]) num_rounds=round(len(df)/num_records) log(INFO, "Federated learning Client connecting to Server @: %s ",str(server_address)) try: flower.client.start_numpy_client(server_address=server_address, client=aionflc(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train, y_test)) except Exception as e: logger.info("AION FL Client instance error: \n"+str(e)) log(INFO, "AION federated learning Client %s execution completed.",str(client_id)) elif(model_name.lower() == "deeplearning"): input_shape = X.shape[1] # len(selected_feature) output_shape = len(y.value_counts()) optimizer = model_params["optimizer"] loss_func = model_params["losses"] act_func = model_params["activation"] model = None model = dl_model.dl_regression_model(input_shape, 1, optimizer, loss_func, act_func) num_rounds=round(len(df)/num_records) log(INFO, "Federated learning Client connecting to Server @: %s ",str(server_address)) try: flower.client.start_numpy_client(server_address=server_address, client=aionflc_dl(model,num_rounds,model_name,model_version,wait_time,client_id,num_records,model_overwrite,problem_type,X_train_scaled, X_test_scaled, y_train, y_test,model_params)) except Exception as e: logger.info("AION FL Client instance error: \n"+str(e)) log(INFO, "AION federated learning Client %s execution completed.",str(client_id))
utils.py
from typing import Tuple, Union, List import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import SGDClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from flwr.common.logger import log from logging import INFO XY = Tuple[np.ndarray, np.ndarray] Dataset = Tuple[XY, XY] LogRegParams = Union[XY, Tuple[np.ndarray]] XYList = List[XY] modelUsed=None modelname=None def setmodelName(modelselected): try: modelname=str(modelselected) print("setmodelName ,given modelname: \n",modelname) if (modelname.lower() == 'logisticregression'): modelUsed=LogisticRegression() return True elif (modelname.lower() == "linearregression"): modelUsed = LinearRegression() return True elif (modelname.lower() == "sgdclassifier"): #from sklearn.linear_model import SGDClassifier modelUsed=SGDClassifier() return True elif (modelname.lower() == "knn"): modelUsed = KNeighborsClassifier() return True elif (modelname.lower() == "decisiontreeclassifier"): modelUsed = DecisionTreeClassifier() return True else: return False except Exception as e: log(INFO, "set fl model name fn issue: ",e) def get_model_parameters(model:modelUsed) -> LogRegParams: """Returns the paramters of a sklearn LogisticRegression model.""" model_name=model.__class__.__name__ if model.fit_intercept: params = (model.coef_, model.intercept_) else: params = (model.coef_,) return params def set_model_params( model:modelUsed, params: LogRegParams ) -> modelUsed: """Sets the parameters of a sklean LogisticRegression model.""" model.coef_ = params[0] model_name=model.__class__.__name__ try: if model.fit_intercept: model.intercept_ = params[1] except Exception as e: log(INFO, "set_model_params fn issue: ",e) pass return model def set_initial_params_reg(model,no_vals,no_features): """Sets initial parameters as zeros Required since model params are uninitialized until model.fit is called. But server asks for initial parameters from clients at launch. Refer to sklearn.linear_model.LogisticRegression documentation for more information. """ no_vals = no_vals n_features = no_features # model.classes_ = np.array([i for i in range(n_classes)]) model.coef_ = np.zeros( n_features,) model_name=model.__class__.__name__ try: if model.fit_intercept: # model.intercept_ = np.ones((no_vals,1)) model.intercept_ = np.zeros((no_vals,)) except Exception as e: log(INFO, "set_initial_params fn issue: ",e) pass def set_initial_params(model,no_classes,no_features): """Sets initial parameters as zeros Required since model params are uninitialized until model.fit is called. But server asks for initial parameters from clients at launch. Refer to sklearn.linear_model.LogisticRegression documentation for more information. """ n_classes = no_classes n_features = no_features model.classes_ = np.array([i for i in range(n_classes)]) model.coef_ = np.zeros((n_classes, n_features)) model_name=model.__class__.__name__ try: if model.fit_intercept: model.intercept_ = np.zeros((n_classes,)) except Exception as e: log(INFO, "set_initial_params fn issue: ",e) pass def shuffle(X: np.ndarray, y: np.ndarray) -> XY: """Shuffle X and y.""" rng = np.random.default_rng() idx = rng.permutation(len(X)) return X[idx], y[idx] def partition(X: np.ndarray, y: np.ndarray, num_partitions: int) -> XYList: """Split X and y into a number of partitions.""" return list( zip(np.array_split(X, num_partitions), np.array_split(y, num_partitions)) )
parameters.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from hyperopt import fmin, tpe, hp, STATUS_OK, Trials import numpy as np import logging import sys import os class parametersDefine(): def __init__(self): self.paramDict = None self.log = logging.getLogger('eion') def getParamSpaceSize(self,paramDict): size=1 if(len(paramDict)==0): return 0 for keys in paramDict.keys(): size=size*len(paramDict[keys]) return size def paramDefine(self, paramSpace, method): paramDict = {} for j in list(paramSpace.keys()): inp = paramSpace[j] try: isLog = False isLin = False isRan = False isList = False isString = False try: # check if functions are given as input and reassign paramspace v = paramSpace[j] if 'logspace' in paramSpace[j]: paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "") isLog = True elif 'linspace' in paramSpace[j]: paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "") isLin = True elif 'range' in paramSpace[j]: paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "") isRan = True elif 'list' in paramSpace[j]: paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "") isList = True elif '[' and ']' in paramSpace[j]: paramSpace[j] = v.split('[')[1].split(']')[0].replace(" ", "") isList = True x = paramSpace[j].split(',') except Exception as e: if isinstance(paramSpace[j], (int, float)): paramSpace[j] = str(paramSpace[j]) x = [] x.append(paramSpace[j]) str_arg = paramSpace[j] # check if arguments are string try: test = eval(x[0]) except: isString = True if isString: paramDict.update({j: hp.choice(j, x)} if method == 'bayesopt' else {j: x}) else: res = eval(str_arg) if isLin: y = eval('np.linspace' + str(res)) paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))} if method == 'bayesopt' else {j: y}) elif isLog: y = eval('np.logspace' + str(res)) paramDict.update( {j: hp.uniform(j, 10 ** eval(x[0]), 10 ** eval(x[1]))} if method == 'bayesopt' else {j: y}) elif isRan: y = eval('np.arange' + str(res)) paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y}) # check datatype of argument elif isinstance(eval(x[0]), bool): y = list(map(lambda i: eval(i), x)) paramDict.update({j: hp.choice(j, eval(str(y)))} if method == 'bayesopt' else {j: y}) elif isinstance(eval(x[0]), float): res = eval(str_arg) if len(str_arg.split(',')) == 3 and not isList: y = eval('np.linspace' + str(res)) #print(y) paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))} if method == 'bayesopt' else {j: y}) else: y = list(res) if isinstance(res, tuple) else [res] paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y}) else: res = eval(str_arg) if len(str_arg.split(',')) == 3 and not isList: y = eval('np.linspace' +str(res)) if eval(x[2]) >= eval(x[1]) else eval('np.arange'+str(res)) else: y = list(res) if isinstance(res, tuple) else [res] paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y}) except Exception as inst: self.log.info('\n-----> Parameter parsing failed!!!.' + str(inst)) self.log.info("The entered parameter is invalid: {"+ j +':'+ inp+'}') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) raise return paramDict
machinelearning.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import warnings warnings.filterwarnings('ignore') import logging import sklearn from sklearn.neighbors import NearestNeighbors from sklearn.cluster import KMeans from sklearn.cluster import DBSCAN from random import sample from numpy.random import uniform import numpy as np import math import pickle import os from math import isnan from sklearn.preprocessing import binarize from sklearn.preprocessing import LabelEncoder from sklearn.metrics import davies_bouldin_score from utils.file_ops import save_csv_compressed from sklearn.metrics import silhouette_score try: from sklearn.metrics import calinski_harabasz_score as calinski_harabaz_score except: from sklearn.metrics import calinski_harabaz_score import pandas as pd from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import roc_curve, auc from sklearn.metrics import roc_auc_score from sklearn.metrics import matthews_corrcoef from sklearn.metrics import brier_score_loss from sklearn.preprocessing import LabelBinarizer from sklearn.model_selection import train_test_split from sklearn.decomposition import LatentDirichletAllocation from learner.classificationModel import ClassifierModel from learner.regressionModel import RegressionModel from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error,make_scorer from sklearn.metrics import mean_squared_error from sklearn.metrics import RocCurveDisplay, auc, roc_curve import matplotlib.pyplot as plt #print("1") #from alibi.explainers import ALE,plot_ale #pd.set_option('display.max_columns', 10) #pd.set_option('display.width', None) def get_prediction( model, loaded_model, xtrain, xtest=None): train_prob = None test_prob = None predictedData = [] if xtest.empty: is_xtest = False else: is_xtest = True if model.lower() == 'lda': if is_xtest: predictedData = loaded_model.transform(xtest).argmax(axis=1) trainPredictedData = loaded_model.transform(xtrain) elif model.lower() == 'dbscan': if is_xtest: predictedData = loaded_model.fit_predict(xtest) predictedData = loaded_model.labels_ trainPredictedData = loaded_model.fit_predict(xtrain) trainPredictedData = loaded_model.labels_ elif model == 'Neural Architecture Search': train_prob = estimator.predict(xtrain) if train_prob.shape[1] == 1: train_prob = np.hstack(( 1-train_prob, train_prob)) trainPredictedData = np.argmax(train_prob, axis=1) if is_xtest: test_prob = estimator.predict(xtest) if test_prob.shape[1] == 1: test_prob = np.hstack(( 1-test_prob, test_prob)) predictedData = np.argmax(test_prob, axis=1) elif model in ['Deep Q Network','Dueling Deep Q Network']: from tf_agents.trajectories import time_step from tensorflow import constant q, _ = loaded_model(np.array(xtrain), step_type=constant([time_step.StepType.FIRST] * np.array(xtrain).shape[0]), training=False) train_prob = q.numpy() if train_prob.shape[1] == 1: train_prob = np.hstack(( 1-train_prob, train_prob)) trainPredictedData = np.argmax(train_prob, axis=1) predictedData = np.argmax(test_prob, axis=1) if is_xtest: q,_ = loaded_model(np.array(xtest), step_type=constant([time_step.StepType.FIRST] * np.array(xtest).shape[0]), training=False) test_prob = q.numpy() if test_prob.shape[1] == 1: test_prob = np.hstack(( 1-test_prob, test_prob)) predictedData = np.argmax(test_prob, axis=1) else: if is_xtest: predictedData = loaded_model.predict(xtest) trainPredictedData = loaded_model.predict(xtrain) if hasattr(loaded_model, 'predict_proba'): train_prob = loaded_model.predict_proba(xtrain) if is_xtest: test_prob = loaded_model.predict_proba(xtest) return trainPredictedData, predictedData, train_prob, test_prob class machinelearning(object): def __init__(self): self.features=[] self.log = logging.getLogger('eion') self.plots = [] def cluster_tendency(self,featureData): self.log.info("\n------------- Cluster Tendency Check -------------") d = featureData.shape[1] n = len(featureData) m = int(0.1 * n) nbrs = NearestNeighbors(n_neighbors=1).fit(featureData.values) rand_X = sample(range(0, n, 1), m) ujd = [] wjd = [] for j in range(0, m): u_dist, _ = nbrs.kneighbors(uniform(np.amin(featureData,axis=0),np.amax(featureData,axis=0),d).reshape(1, -1), 2, return_distance=True) ujd.append(u_dist[0][1]) if isinstance(featureData.iloc[rand_X[j]].values, pd.core.arrays.sparse.array.SparseArray): featureData_reshaped = np.asarray(featureData.iloc[rand_X[j]].values).reshape(1, -1) else: featureData_reshaped = featureData.iloc[rand_X[j]].values.reshape(1, -1) w_dist, _ = nbrs.kneighbors(featureData_reshaped, 2, return_distance=True) wjd.append(w_dist[0][1]) try: clusetTendency = sum(ujd) / (sum(ujd) + sum(wjd)) except: clusetTendency = 0 if isnan(clusetTendency): clusetTendency = 0 self.log.info("-------> Cluster Tendency value using Hopkins Statistic: "+str(clusetTendency)) self.log.info("------------- Cluster Tendency Check End-------------\n") return (clusetTendency) def calculateNumberofCluster(self,featureData): self.log.info("\n------------- Calculate Number of Cluster -------------") Sum_of_squared_distances = [] K = range(1,15) for k in K: km = KMeans(n_clusters=k) km = km.fit(featureData) Sum_of_squared_distances.append(km.inertia_) x1, y1 = 1, Sum_of_squared_distances[0] x2, y2 = 15, Sum_of_squared_distances[len(Sum_of_squared_distances)-1] distances = [] for inertia in range(len(Sum_of_squared_distances)): x0 = inertia+2 y0 = Sum_of_squared_distances[inertia] numerator = abs((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1) denominator = math.sqrt((y2 - y1)**2 + (x2 - x1)**2) distances.append(numerator/denominator) n_clusters=distances.index(max(distances)) + 2 self.log.info("-------> n_clusters: "+str(n_clusters-1)) self.log.info("------------- Calculate Number of Cluster End-------------\n") return(n_clusters-1) def getclusterMatrix(self,featureData,targetData): silhouetteAvg = silhouette_score(featureData,targetData) self.log.info("-------> SilHouette_Avg: "+str(silhouetteAvg)) daviesBouldinScore=davies_bouldin_score(featureData, targetData) self.log.info("-------> DaviesBouldinScore: "+str(daviesBouldinScore)) calinskiHarabazScore=calinski_harabaz_score(featureData,targetData) self.log.info("-------> CalinskiHarabazScore: "+str(calinskiHarabazScore)) matrix = '"SilHouette_Avg":'+str(silhouetteAvg)+',"DaviesBouldinScore":'+str(daviesBouldinScore)+',"CalinskiHarabazScore":'+str(calinskiHarabazScore) return(matrix) def get_regression_matrix(self,targetData,predictedData): try: r2score=r2_score(targetData, predictedData) self.log.info('-------> R2_score :'+str(r2score)) except Exception as e: self.log.info('\n--------- r2_score ',str(e)) r2score = 0 try: meanabsoluteerror=(mean_absolute_error(targetData, predictedData)) self.log.info('-------> MAE :'+str(meanabsoluteerror)) except Exception as e: self.log.info('\n---------Error: meanabsoluteerror ',str(e)) meanabsoluteerror = 0 try: meanssquatederror=mean_squared_error(targetData, predictedData) self.log.info('-------> MSE :'+str(meanssquatederror)) except Exception as e: self.log.info('\n---------Error: meanssquatederror ',str(e)) meanssquatederror = 0 try: rootmeanssquatederror=mean_squared_error(targetData, predictedData,squared=False) self.log.info('-------> RMSE :'+str(rootmeanssquatederror)) except Exception as e: self.log.info('\n---------Error: rootmeanssquatederror ',str(e)) rootmeanssquatederror = 0 try: normalised_rmse_percentage = (rootmeanssquatederror/ ( np.max(targetData) - np.min(targetData) )) * 100 self.log.info('-------> Normalised RMSE percentage :'+str(normalised_rmse_percentage)) except Exception as e: self.log.info('\n---------Error: Normalised RMSE percentage ',str(e)) normalised_rmse_percentage = -1 try: targetArray, predictedArray = np.array(targetData), np.array(predictedData) try: EPSILON = 1e-10 meanpercentageerror=np.mean(np.abs((targetArray - predictedArray) / (targetArray+EPSILON)))*100 except ZeroDivisionError: meanpercentageerror = 0 self.log.info('-------> MAPE :'+str(meanpercentageerror)) except Exception as e: self.log.info('\n---------Error: meanpercentageerror ',str(e)) meanpercentageerror = 0 matrix = '"MAE":'+str(round(meanabsoluteerror,2))+',"R2Score":'+str(round(r2score,2))+',"MSE":'+str(round(meanssquatederror,2))+',"MAPE":'+str(round(meanpercentageerror,2))+',"RMSE":'+str(round(rootmeanssquatederror,2))+',"Normalised RMSE(%)":'+str(round(normalised_rmse_percentage,2)) return matrix def getClassificationPerformaceMatrix(self,le_trainY,predictedData,prob,labelMaps): setOfyTrue = set(le_trainY) unqClassLst = list(setOfyTrue) if len(unqClassLst) <= 20: if str(labelMaps) != '{}': inv_mapping_dict = {v: k for k, v in labelMaps.items()} unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict) unqClassLst2 = list(unqClassLst2) else: unqClassLst2 = unqClassLst indexName = [] columnName = [] targetnames=[] for item in unqClassLst2: indexName.append("act:"+str(item)) columnName.append("pre:"+str(item)) targetnames.append(str(item)) matrixconfusion = pd.DataFrame(confusion_matrix(le_trainY,predictedData, labels = unqClassLst),index = indexName, columns = columnName) pd.set_option('display.max_columns',len(targetnames)+2) self.log.info('-------> Confusion Matrix: ') self.log.info(matrixconfusion) pd.reset_option('display.max_columns') classificationreport = pd.DataFrame(classification_report(le_trainY, predictedData, labels = unqClassLst,target_names=targetnames,output_dict=True)).transpose() self.log.info('-------> Classification Report: ') self.log.info(classificationreport) matrixconfusion = matrixconfusion.to_json(orient='index') classificationreport = classificationreport.to_json(orient='index') else: #bugid: 14540 self.log.info('-------> As the number of class is more than 20, skipping the creation of confusion_matrix and classification Report') return "" lb = LabelBinarizer() lb.fit(le_trainY) transformTarget= lb.transform(le_trainY) if transformTarget.shape[-1] == 1: transformTarget = le_trainY prob = np.delete( prob, 0, 1) rocaucscore = roc_auc_score(transformTarget,prob,average="macro") brier_score = None mcc_score = matthews_corrcoef(le_trainY,predictedData) if len(unqClassLst) > 2: brier_score = np.mean(np.sum(np.square(prob - transformTarget), axis=1)) else: brier_score = brier_score_loss(transformTarget,prob) self.log.info('-------> ROC AUC SCORE :'+str(rocaucscore)) self.log.info(f'-------> Matthews correlation coefficient SCORE : {mcc_score}') self.log.info(f'-------> BRIER SCORE : {brier_score}') matrix = f'"ConfusionMatrix": {matrixconfusion},"ClassificationReport": {classificationreport},"ROC_AUC_SCORE": {rocaucscore},"MCC_SCORE": {mcc_score},"BRIER_SCORE": {brier_score}' return(matrix) def split_into_train_test_data(self,featureData,targetData,testPercentage,modelType='classification'): ''' if cvSplit == None: ''' self.log.info('\n-------------- Test Train Split ----------------') if testPercentage == 0: xtrain=featureData ytrain=targetData xtest=featureData ytest=targetData else: testSize=testPercentage/100 if modelType == 'regression': self.log.info('-------> Split Type: Random Split') xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True) else: try: self.log.info('-------> Split Type: Stratify Split') xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,stratify=targetData,test_size=testSize,shuffle=True) except: self.log.info('-------> Split Type: Random Split') xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True) self.log.info('Status:- !... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test') self.log.info('-------> Train Data Shape: '+str(xtrain.shape)+' ---------->') self.log.info('-------> Test Data Shape: '+str(xtest.shape)+' ---------->') self.log.info('-------------- Test Train Split End ----------------\n') ''' else: xtrain=featureData ytrain=targetData xtest=featureData ytest=targetData ''' return(xtrain,ytrain,xtest,ytest) def checkForClassBalancing(self,targetData): imbalancedCount=0 valueCount=targetData.value_counts() self.log.info("---------- Checking for Class Imbalance on Train Data---------") self.log.info("-------> Categories and Count:") self.log.info(valueCount) categoryList=valueCount.keys().tolist() categoryCountList=valueCount.tolist() for i in range(0,len(categoryCountList)): if float(categoryCountList[i])<=float(0.5*max(categoryCountList)): self.log.info("-------> Found Imbalanced class: '"+str(categoryList[i])+"' Count: "+str(categoryCountList[i])) imbalancedCount=imbalancedCount+1 if imbalancedCount == 0: self.log.info("-------> Status: Balanced") self.log.info('Status:- |... Check for Data balancing done: Balanced') else: self.log.info("-------> Status: Unbalanced") self.log.info('Status:- |... Check for Data balancing done: Unbalanced') self.log.info("---------- Checking for Class Imbalance on Train Data End---------") return(imbalancedCount) def ExecuteClassBalancing(self,featureData,targetData,balancingMethod): from imblearn.over_sampling import SMOTE from imblearn.under_sampling import TomekLinks from collections import Counter self.log.info('\n------------ Balancing Start --------------') if balancingMethod.lower() == "oversample": self.log.info("-------> Method: SMOTE OverSampling Technique") k=1 seed=100 try: oversample = SMOTE(sampling_strategy='auto', k_neighbors=k, random_state=seed) balfeatureData, baltargetData = oversample.fit_resample(featureData, targetData) self.log.info(baltargetData.value_counts()) except Exception as inst: self.log.info("\n!!!!!!!!! OverSampling Fails "+str(inst)+" !!!!!!!!!!!!!!\n") balfeatureData = featureData baltargetData = targetData elif balancingMethod.lower() == "undersample": self.log.info("-------> Method: Tomelinks UnderSampling Technique") tLinks = TomekLinks() balfeatureData, baltargetData= tLinks.fit_resample(featureData, targetData) #Added for checking balancing act by the algorithm. counter = Counter(baltargetData) self.log.info("Class counter:\t"+str(baltargetData.value_counts())) max_class = max(counter,key=counter.get) max_value = max(counter.values()) self.log.info("Max samples: "+str(max_value)+ " in the class: "+str(max_class)) for k,v in counter.items(): if v < (max_value*98/100): self.log.info("Undersampling is not able to do perfect data balancing.") self.log.info("The method is used to identify the desired samples of data from the majority class that is having the lowest Euclidean distance with the minority class data. Downsampling may not balance the class after applying this method.\n") self.log.info(baltargetData.value_counts()) else: balfeatureData = featureData baltargetData = targetData self.log.info("-------> Method: Balancing Not Applied") self.log.info('-------> Memory Usage by Training DataFrame After Class Balancing '+str(featureData.memory_usage(deep=True).sum())) self.log.info('Status:- |... Data balancing done: '+str(balancingMethod)) self.log.info('------------ Balancing End --------------\n') return(balfeatureData,baltargetData) def combine_text_features(self,dataFrame,dataColumns): column_merge_flag = False merge_columns = [] if(len(dataColumns) > 1): dataFrame['combined'] = dataFrame[dataColumns].apply(lambda row: ' '.join(row.values.astype(str)), axis=1) merge_columns = dataColumns features = ['combined'] column_merge_flag = True self.log.info("After Text Concatenation") self.log.info(dataFrame['combined'].head(10)) self.log.info("List of Combined Columns ---> "+ str(dataColumns) +"\n") else: features = dataColumns return(dataFrame,features,column_merge_flag,merge_columns) ''' def create_preprocessing_pipeline(self,X): textDataProfilerObj=textDataProfiler() tfidfVector = TfidfVectorizer(tokenizer = textDataProfilerObj.textTokenizer) pipe = Pipeline([("cleaner", TextCleaner()),('vectorizer', tfidfVector)]) vectors=pipe.fit(X) transformedVector=pipe.transform(X) return(pipe,transformedVector) ''' def get_topics(self, model, feature_names, no_top_words): topicDict = {} for topic_idx, topic in enumerate(model.components_): wordDict = {} topicProb = [(feature_names[i],topic[i]/topic.sum()) for i in topic.argsort()[:-no_top_words - 1:-1]] for word, prob in topicProb: if word.endswith('_vect'): word = word[:-len('_vect')] wordDict[word] = prob topicDict[ topic_idx] = wordDict return topicDict def transform_target_feature(self,dataFrame,targetColumn): targetDataType=dataFrame[targetColumn].dtypes pandasNumericDtypes=['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] labelMapping= {} if targetDataType not in pandasNumericDtypes: le = LabelEncoder() le.fit(dataFrame[targetColumn]) le_trainY = le.transform(dataFrame[targetColumn]) labelMapping = dict(zip(le.classes_, le.transform(le.classes_))) self.log.info(" \n encoded Values of predicator column ===>"+str(labelMapping)) else: le_trainY = dataFrame[targetColumn] return le_trainY,labelMapping def setScoreParams(self,scoreParam,modelType,categoryCountList): if modelType == 'classification' or modelType == 'TextClassification': allowedmatrix = ['accuracy','recall','f1_score','precision','roc_auc'] if(scoreParam.lower() not in allowedmatrix): scoreParam = 'accuracy' elif scoreParam.lower() == 'none': scoreParam = 'accuracy' elif scoreParam.lower() == "recall": if len(categoryCountList) > 2: scoreParam = make_scorer(sklearn.metrics.recall_score, average = 'weighted') else: scoreParam = make_scorer(sklearn.metrics.recall_score) elif scoreParam.lower() == "precision" : if len(categoryCountList) > 2: scoreParam = make_scorer(sklearn.metrics.precision_score, average = 'weighted') else: scoreParam = make_scorer(sklearn.metrics.precision_score) elif scoreParam.lower() == "f1_score" : if len(categoryCountList) > 2: scoreParam = make_scorer(sklearn.metrics.f1_score, average = 'weighted') else: scoreParam = make_scorer(sklearn.metrics.f1_score) elif scoreParam.lower() == "roc_auc" : if len(categoryCountList) > 2: scoreParam = make_scorer(sklearn.metrics.roc_auc_score,needs_proba=True,multi_class='ovr',average='weighted') else: scoreParam = make_scorer(sklearn.metrics.roc_auc_score) else: scoreParam = scoreParam else: allowedmatrix = ['mse','r2','rmse','mae'] if(scoreParam.lower() not in allowedmatrix): scoreParam = 'neg_mean_squared_error' elif scoreParam.lower() == 'none': scoreParam = 'neg_mean_squared_error' elif scoreParam.lower() == 'mse': scoreParam = 'neg_mean_squared_error' elif scoreParam.lower() == 'rmse': #scoreParam = make_scorer(sklearn.metrics.mean_squared_error, squared = False) scoreParam='neg_root_mean_squared_error' elif scoreParam.lower() == 'mae': scoreParam = 'neg_mean_absolute_error' elif scoreParam.lower() == 'r2': scoreParam = 'r2' else: scoreParam = scoreParam #self.log.info('Status:- !... Scoring parameters selected') self.log.info("-------> Scoring parameter: "+str(scoreParam)) return(scoreParam) def getbestfeatureModel(self,modelType,scoreParam,score1,score2,model1,model2,threshold1,pscore1,rscore1,threshold2,pscore2,rscore2,featuresset1,featureset2): best_feature_model = featuresset1 self.log.info('\n ---------- ML Summary ------------') if modelType.lower() == "classification": if(threshold1 == -1 and threshold2 == -1): if score1> score2: self.log.info('-------> Best Features: '+str(featuresset1)) self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featuresset1 else: self.log.info('-------> Best Features:'+str(featureset2)) self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featureset2 elif(threshold1 == -1): self.log.info('-------> Best Features: '+str(featureset2)) self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featureset2 elif(threshold1 == -2): self.log.info('-------> Best Features: '+str(featuresset1)) self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model =featuresset1 else: if pscore1 == pscore2: if rscore1 > rscore2: self.log.info('-------> Best Features: '+str(featuresset1)) self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featuresset1 else: self.log.info('-------> Best Features: '+str(featureset2)) self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featureset2 elif rscore1 == rscore2: if pscore1 > pscore2: self.log.info('-------> Best Features: '+str(featuresset1)) self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featuresset1 else: self.log.info('-------> Best Features: '+str(featureset2)) self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featureset2 elif modelType.lower() == "regression": if scoreParam == "r2" or scoreParam == "explained_variance": if score1> score2 : self.log.info('-------> Best Features: '+str(featuresset1)) self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featuresset1 else: self.log.info('-------> Best Features: '+str(featureset2)) self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featureset2 else: if score1< score2 : self.log.info('-------> Best Features: '+str(featuresset1)) self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featuresset1 else: self.log.info('-------> Best Features: '+str(featureset2)) self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = featureset2 self.log.info('---------- ML Summary End ------------\n') return(best_feature_model) def startLearning(self,mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,topFeatures,modelFeatures,allFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps,featuresBasedOn,code_configure,featureEngineeringSelector,modelEvaluationConfig,imageFolderLocation): model = 'None' params = 'None' score = 0xFFFF estimator = None model_tried = '' threshold = -1 pscore = -1 rscore = -1 topics = {} if(targetColumn != ''): targetData = dataFrame[targetColumn] datacolumns=list(dataFrame.columns) if targetColumn in datacolumns: datacolumns.remove(targetColumn) if(modelType != 'clustering') and (modelType != 'TopicModelling'): scoreParam = self.setScoreParams(scoreParam,modelType,categoryCountList) if len(topFeatures) > 0: self.log.info('\n-------------- Training ML: Top/StatisticalBased Features Start --------------') modelbasedon = 'StatisticalBased' if featureEngineeringSelector.lower() == 'true': self.log.info('Status:- |... Algorithm analysis based on feature engineering based feature selection started') modelbasedon = 'DimensionalityReduction' else: self.log.info('Status:- |... Algorithm analysis based on statistical based feature selection started') model_type1,model1,params1, score1, estimator1,model_tried1,xtrain1,ytrain1,xtest1,ytest1,threshold1,pscore1,rscore1,method,topics=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,topFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, modelbasedon,code_configure,modelEvaluationConfig) if model_tried != '': model_tried += ',' model_tried += model_tried1 topFeaturesStatus = True if featureEngineeringSelector.lower() == 'true': self.log.info('Status:- |... Algorithm analysis based on feature engineering based feature selection completed') else: self.log.info('Status:- |... Algorithm analysis for statistical based feature completed') self.log.info('-------------- Training ML: Top/StatisticalBased Features End --------------\n') else: topFeaturesStatus = False if len(modelFeatures) > 0: self.log.info('\n-------------- Training ML: Models Based Selected Features Start --------------') self.log.info('Status:- |... Algorithm analysis based on model based feature selection started') model_type2,model2,params2, score2, estimator2,model_tried2,xtrain2,ytrain2,xtest2,ytest2,threshold2,pscore2,rscore2,method,topics=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,modelFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, "ModelBased",code_configure,modelEvaluationConfig) #model_tried2['Features'] = 'ModelBased' if model_tried != '': model_tried += ',' model_tried += model_tried2 modelFeaturesStatus = True self.log.info('Status:- |... Algorithm analysis for model based selected features completed') self.log.info('-------------- Training ML: Models Based Selected Features End --------------\n') else: modelFeaturesStatus = False if len(allFeatures) > 0: self.log.info('Status:- |... Algorithm analysis based on all features Start') model_type3,model3,params3, score3, estimator3,model_tried3,xtrain3,ytrain3,xtest3,ytest3,threshold3,pscore3,rscore3,method,topics=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,allFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, "AllFeatures",code_configure,modelEvaluationConfig) #model_tried3['Features'] = 'AllFeatures' allFeaturesStatus = True if model_tried != '': model_tried += ',' model_tried += model_tried3 self.log.info('Status:- |... Algorithm analysis based all features completed') else: allFeaturesStatus = False #print(topFeaturesStatus,modelFeaturesStatus,allFeaturesStatus) if topFeaturesStatus: if modelFeaturesStatus: best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score1,score2,model1,model2,threshold1,pscore1,rscore1,threshold2,pscore2,rscore2,'StatisticalBased','ModelBased') if best_feature_model == 'StatisticalBased' and allFeaturesStatus: best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score1,score3,model1,model3,threshold1,pscore1,rscore1,threshold3,pscore3,rscore3,'StatisticalBased','AllFeatures') if best_feature_model == 'ModelBased' and allFeaturesStatus: best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score2,score3,model2,model3,threshold2,pscore2,rscore2,threshold3,pscore3,rscore3,'ModelBased','AllFeatures') elif allFeaturesStatus: best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score1,score3,model1,model3,threshold1,pscore1,rscore1,threshold3,pscore3,rscore3,'StatisticalBased','AllFeatures') else: best_feature_model = 'StatisticalBased' if featureEngineeringSelector.lower() == 'true': best_feature_model = 'DimensionalityReduction' else: if modelFeaturesStatus and allFeaturesStatus: best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score2,score3,model2,model3,threshold2,pscore2,rscore2,threshold3,pscore3,rscore3,'ModelBased','AllFeatures') elif modelFeaturesStatus: best_feature_model = 'ModelBased' elif allFeaturesStatus: best_feature_model = 'AllFeatures' if (best_feature_model == 'StatisticalBased' or best_feature_model == 'DimensionalityReduction'): model_type = model_type1 model = model1 params = params1 score = score1 estimator = estimator1 #model_tried = model_tried1 xtrain = xtrain1 ytrain = ytrain1 xtest = xtest1 ytest = ytest1 features = topFeatures threshold = threshold1 pscore = pscore1 rscore = rscore1 elif (best_feature_model == 'AllFeatures'): model_type = model_type3 model = model3 params = params3 score = score3 estimator = estimator3 #model_tried = model_tried3 xtrain = xtrain3 ytrain = ytrain3 xtest = xtest3 ytest = ytest3 features = allFeatures threshold = threshold3 pscore = pscore3 rscore = rscore3 else: model_type = model_type2 model = model2 params = params2 score = score2 estimator = estimator2 #model_tried = model_tried2 xtrain = xtrain2 ytrain = ytrain2 xtest = xtest2 ytest = ytest2 threshold = threshold2 pscore = pscore2 rscore = rscore2 features = modelFeatures if score != 'NA': self.log.info('Status:- |... Final Best Algorithm selected: '+model+' having score='+str(round(score,2))+' based on '+best_feature_model+' feature selection') filename = os.path.join(deployLocation,'model',iterName+'_'+iterVersion+'.sav') saved_model = iterName+'_'+iterVersion+'.sav' if model == 'Neural Architecture Search': loaded_model = estimator try: estimator.save(filename, save_format="tf") except Exception: filename = os.path.join(deployLocation,'model','autoKerasModel.h5') estimator.save(filename) saved_model = 'autoKerasModel.h5' else: pickle.dump(estimator, open(filename, 'wb')) loaded_model = pickle.load(open(filename, 'rb')) if not xtest.empty: df_test = xtest.copy() else: df_test = xtrain.copy() if threshold == -1: if model.lower() == 'lda': predictedData = loaded_model.transform(xtest).argmax(axis=1) trainPredictedData = loaded_model.transform(xtrain) elif model.lower() == 'dbscan': predictedData = loaded_model.fit_predict(xtest) predictedData = loaded_model.labels_ trainPredictedData = loaded_model.fit_predict(xtrain) trainPredictedData = loaded_model.labels_ elif model == 'Neural Architecture Search': test_prob = estimator.predict(xtest) train_prob = estimator.predict(xtrain) if train_prob.shape[1] == 1: train_prob = np.hstack(( 1-train_prob, train_prob)) test_prob = np.hstack(( 1-test_prob, test_prob)) predictedData = np.argmax(test_prob, axis=1) trainPredictedData = np.argmax(train_prob, axis=1) elif model in ['Deep Q Network','Dueling Deep Q Network']: from tf_agents.trajectories import time_step from tensorflow import constant from sklearn.preprocessing import MinMaxScaler q, _ = loaded_model(np.array(xtest), step_type=constant([time_step.StepType.FIRST] * np.array(xtest).shape[0]), training=False) test_prob = MinMaxScaler().fit_transform( q.numpy()) q, _ = loaded_model(np.array(xtrain), step_type=constant([time_step.StepType.FIRST] * np.array(xtrain).shape[0]), training=False) train_prob = MinMaxScaler().fit_transform( q.numpy()) predictedData = np.argmax(test_prob, axis=1) trainPredictedData = np.argmax(train_prob, axis=1) elif modelType == 'clustering': if not xtest.empty: predictedData = loaded_model.predict(xtest) trainPredictedData = loaded_model.predict(xtrain) else: if not xtest.empty: predictedData = loaded_model.predict(xtest) trainPredictedData = loaded_model.predict(xtrain) if hasattr(loaded_model, 'predict_proba'): train_prob = loaded_model.predict_proba(xtrain) if not xtest.empty: test_prob = loaded_model.predict_proba(xtest) else: self.log.info("-------> Threshold :"+str(threshold)) if not xtest.empty: #bug 12437 if 'predict_proba' in dir(loaded_model): test_prob = loaded_model.predict_proba(xtest) predictedData = binarize(test_prob[:,1].reshape(-1, 1),threshold=threshold) else: raise Exception('--------- Loaded model does not support predict_proba ---------\n') train_prob = loaded_model.predict_proba(xtrain) trainPredictedData = binarize(train_prob[:,1].reshape(-1, 1),threshold=threshold) matrix = '' try: if(model_type == 'Classification'): self.log.info('\n--------- Performance Matrix with Train Data ---------') train_matrix = self.getClassificationPerformaceMatrix(ytrain,trainPredictedData,train_prob,labelMaps) self.log.info('--------- Performance Matrix with Train Data End ---------\n') if not xtest.empty: self.log.info('\n--------- Performance Matrix with Test Data ---------') performancematrix = self.getClassificationPerformaceMatrix(ytest,predictedData,test_prob,labelMaps) df_test['actual'] = ytest df_test['predict'] = predictedData self.log.info('--------- Performance Matrix with Test Data End ---------\n') matrix = performancematrix if hasattr( loaded_model, 'predict_proba'): predictedData_fit = loaded_model.predict_proba(xtest) elif model == 'Neural Architecture Search': predictedData_fit = estimator.predict(xtest) elif model in ['Deep Q Network','Dueling Deep Q Network']: from tf_agents.trajectories import time_step from tensorflow import constant q, _ = loaded_model(np.array(xtest), step_type=constant([time_step.StepType.FIRST] * np.array(xtest).shape[0]), training=False) predictedData_fit = q.numpy() else: predictedData_fit = loaded_model.predict(xtest) if predictedData_fit.shape[1] == 1: predictedData_fit = np.hstack((1 - predictedData_fit, predictedData_fit)) self.auc_roccurve(ytest,predictedData_fit,labelMaps,imageFolderLocation) else: df_test['actual'] = ytrain df_test['predict'] = trainPredictedData elif(model_type == 'Regression'): self.log.info('\n--------- Performance Matrix with Train Data ---------') train_matrix = self.get_regression_matrix(ytrain, trainPredictedData) self.log.info('--------- Performance Matrix with Train Data End ---------\n') if not xtest.empty: self.log.info('\n--------- Performance Matrix with Test Data ---------') matrix = self.get_regression_matrix(ytest, predictedData) df_test['actual'] = ytest df_test['predict'] = predictedData self.log.info('--------- Performance Matrix with Test Data End ---------\n') else: df_test['actual'] = ytrain df_test['predict'] = trainPredictedData elif(model_type == 'Clustering'): self.log.info('\n--------- Performance Matrix with Train Data ---------') train_matrix = self.getclusterMatrix(xtrain,trainPredictedData) self.log.info('--------- Performance Matrix with Train Data End ---------\n') self.log.info('\n--------- Performance Matrix with Test Data ---------') performacematrix = self.getclusterMatrix(xtest,predictedData) df_test['predict'] = predictedData self.log.info('--------- Performance Matrix with Test Data End ---------\n') matrix = performacematrix elif(model_type.lower() == 'topicmodelling'): self.log.info('\n--------- Performance Matrix with Train Data ---------') train_matrix = "" self.log.info('--------- Performance Matrix with Train Data End ---------\n') self.log.info('\n--------- Performance Matrix with Test Data ---------') performacematrix = "" df_test['predict'] = predictedData self.log.info('--------- Performance Matrix with Test Data End ---------\n') matrix = performacematrix except Exception as Inst: self.log.info('--------- Error Performance Matrix ---------\n') self.log.info(str(Inst)) df_test['predict'] = predictedData matrix = "" train_matrix = "" self.log.info('--------- Performance Matrix with Test Data End ---------\n') save_csv_compressed(df_test, predicted_data_file, encoding='utf-8') return 'Success',model_type,model,saved_model,matrix,train_matrix,xtrain.shape,model_tried,score,filename,features,threshold,pscore,rscore,method,estimator,xtrain,ytrain,xtest,ytest,topics,params def auc_roccurve(self,y_true,y_score,classee,imageFolderLocation): from keras.utils import to_categorical from sklearn.preprocessing import label_binarize import re n_classes = len(classee) y_true = to_categorical(y_true,num_classes = n_classes) fpr ={} tpr={} roc_auc={} class_names = list(classee.keys()) typeofclass = list(classee.values()) n_class = len(typeofclass) for i in range(n_classes): fpr[i],tpr[i],_ = roc_curve(y_true[:,i], y_score[:,i]) roc_auc[i]= auc(fpr[i],tpr[i]) plt.figure() plt.plot(fpr[i],tpr[i],label=f'{class_names[i]} (AUC = {roc_auc[i]:.2f})') plt.plot([0,1],[0,1], linestyle='--') plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title(f'{class_names[i]} ROC Curve') plt.legend() img_location = os.path.join(imageFolderLocation,str(i)+'_roc.png') #15092 plt.savefig(img_location) def startLearnerModule(self,mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,topFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, modelFeatureBased,code_configure,modelEvaluationConfig): matrix = '' threshold = -1 pscore = -1 rscore = -1 method = mlconfig['optimizationMethod'] method = method.lower() geneticParam = '' topics = {} optimizationHyperParameter = mlconfig['optimizationHyperParameter'] cvSplit = optimizationHyperParameter['trainTestCVSplit'] nIter = int(optimizationHyperParameter['iterations']) if(method.lower() == 'genetic'): geneticParam = optimizationHyperParameter['geneticparams'] scoreParam = scoreParam if 'thresholdTunning' in mlconfig: thresholdTunning = mlconfig['thresholdTunning'] else: thresholdTunning = 'NA' if len(topFeatures) !=0: self.features=topFeatures else: datacolumns=list(xtrain.columns) if targetColumn in datacolumns: datacolumns.remove(targetColumn) self.features =datacolumns self.log.info(f'-------> Number of Features Used For Training the Model: {len(self.features)}') features_names = str(self.features) if len(features_names) > 500: features_names = ','.join(self.features[:2]) + ', ..... ,' + ','.join(self.features[-2:]) self.log.info(f'-------> Features Used For Training the Model: {features_names}') xtrain = xtrain[self.features] if not xtest.empty: xtest = xtest[self.features] if cvSplit == "": cvSplit =None else: cvSplit =int(cvSplit) if modelType == 'classification': model_type = "Classification" MakeFP0 = False MakeFN0 = False if(len(categoryCountList) == 2): self.log.info("\n -------------- Check for FP or FN -------------- ") self.log.info("-------> Binary Classification") if(thresholdTunning.lower() == 'fp0'): self.log.info("-------> Threshold Tuning: False Positive") MakeFP0 = True elif(thresholdTunning.lower() == 'fn0'): self.log.info("-------> Threshold Tuning: False Negative") MakeFN0 = True if MakeFP0 == False and MakeFN0 == False: self.log.info("-------> Threshold Tuning: Not Any") self.log.info("-------------- Check for FP or FN End-------------- \n") elif(len(categoryCountList) > 2): #bug 12438 self.log.info("\n -------------- Check for FP or FN -------------- ") self.log.info("-------> Multiclass Classification") if(thresholdTunning.lower() == 'fp0' or thresholdTunning.lower() == 'fn0'): self.log.info("-------> Threshold Tuning: Not supported") else: self.log.info("-------> Threshold Tuning: Not Any") self.log.info("-------------- Check for FP or FN End-------------- \n") objClf = ClassifierModel(modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,modelType,MakeFP0,MakeFN0,deployLocation) model, params, score, estimator,model_tried,threshold,pscore,rscore = objClf.classModelling( modelFeatureBased,code_configure) elif modelType == 'regression': model_type = "Regression" objClf = RegressionModel(modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,deployLocation) model,params,score,estimator,model_tried = objClf.regressionModelling(modelFeatureBased,code_configure) elif modelType =='clustering': model_type = 'Clustering' print(modelList) if 'KMeans' in modelList: clustendency = self.cluster_tendency(xtrain) model='KMeans' model_tried = '{"Model":"KMeans","Score":"NA"}' kmeanmodelparams=modelParams['KMeans'] n_clusters = kmeanmodelparams['n_clusters'] if n_clusters == None or n_clusters == 0 or n_clusters == '': n_clusters = self.calculateNumberofCluster(xtrain) kmeanmodelparams['n_clusters'] = n_clusters kmeans=KMeans(n_clusters=n_clusters) targetData=kmeans.fit_predict(xtrain) self.log.info('Status:- |... ML Algorithm applied: KMeans') self.log.info('\n------------ Centers Points Start------------') values = kmeans.cluster_centers_.squeeze() #print(values) centers = pd.DataFrame(kmeans.cluster_centers_,columns= xtrain.columns) filename = os.path.join(deployLocation,'centers.csv') centers.to_csv(filename) labels = kmeans.labels_ i=0 for value_row in values: j=0 self.log.info('------->Label: '+str(i)) for value in value_row: self.log.info('---------->Feature: "'+str(self.features[j])+'" Center Point: '+str(value)) j = j+1 i = i+1 self.log.info('------------ Centers Points Start------------\n') score='NA' scoreParam=None params=kmeanmodelparams estimator=kmeans if 'DBSCAN' in modelList: DBSCAN_ModelParams=modelParams['DBSCAN'] db = DBSCAN(eps=DBSCAN_ModelParams['eps'],min_samples = DBSCAN_ModelParams['min_samples']).fit(xtrain) #targetData=db.fit_predict(xtrain) self.log.info('Status:- |... ML Algorithm applied: DBSCAN') labels = db.labels_ n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) n_noise_ = list(labels).count(-1) self.log.info('------->Labels: '+str(labels)) self.log.info('------->No Of Cluster: '+str(n_clusters_)) self.log.info('------->No Of Noise Point: '+str(n_noise_)) score='NA' scoreParam=None params='' estimator=db model='DBSCAN' model_tried = '{"Model":"DBSCAN","Score":"NA"}' elif modelType == 'topicmodelling': model_type = 'TopicModelling' model='LDA' model_tried = '{"Model":"LDA","Score":"NA"}' LDAmodelparams=modelParams['LDA'] n_topics = LDAmodelparams['n_topics'] n_words_per_topic = LDAmodelparams['n_words_per_topic'] if n_topics == None or n_topics == 0 or n_topics == '': n_topics = 10 LDAmodelparams['n_topics'] = n_topics if n_words_per_topic == None or n_words_per_topic == 0 or n_words_per_topic == '': n_words_per_topic = 10 LDAmodelparams['n_words_per_topic'] = n_words_per_topic lda = LatentDirichletAllocation(n_components=n_topics,random_state=0) self.log.info('Status:- |... ML Algorithm applied: LDA') targetData=lda.fit_transform(xtrain) topics = self.get_topics(lda, topFeatures, n_words_per_topic) self.log.info(topics) score='NA' scoreParam=None params=LDAmodelparams estimator=lda return model_type,model,params, score, estimator,model_tried,xtrain,ytrain,xtest,ytest,threshold,pscore,rscore,method, topics
anomalyDetectionAE.py
# -*- coding: utf-8 -*- #Py Libraries import import numpy as np import pandas as pd import tensorflow as tf import matplotlib import matplotlib.pyplot as plt from sklearn.metrics import accuracy_score from tensorflow.keras.optimizers import Adam,SGD # from sklearn.preprocessing import MinMaxScaler from tensorflow.keras import Model, Sequential from tensorflow.keras.layers import Dense, Dropout from sklearn.model_selection import train_test_split from tensorflow.keras.losses import MeanSquaredLogarithmicError import os import json import keras from keras.layers import Input, Dense from keras.callbacks import ModelCheckpoint, EarlyStopping from sklearn.cluster import DBSCAN from sklearn.model_selection import RandomizedSearchCV from sklearn import metrics import traceback import seaborn as sns import warnings warnings.filterwarnings("ignore") import logging import joblib from sklearn import preprocessing ''' Py Engine: >= Python 3.8> anomalyDetectionAE class purpose: To findout anomalies in the user data using autoencoder mechanism. The file contains base class anomalyDetectionAE which act as entry point . mainAnomalyDetectionfn() method decide which algorithm (autoencoder or dbscan to be used based on user input). Two different approaches to find out anomalies, 1.LSTM approach (anomalyDetectionAE.aetsmodel_lstm() fn) 2. Autoencoder approach (AeDetector class) By default, autoencoder approach used (User can select in basic config file.) One more limitation is, time series data will be handled by autoencoder,lstm algs. DBScan algorithm is not supporting time series. Suggestion here is if time series data feature received, drop it. AION dataprofiler automatically remove time feature for DBScan. But for autoencoder we are passing time series. Parameter information: df: Input dataframe from aion base class paramSpace: Default parameter from basic config (user config settings) deployLocation: Deployment location. Detailed anomalies data information are stored in ../target/output loc. Target: only for supervised problems. anomalyMethod: Algorithm to be used (autoEncoder or DBScan) received from paramSpace. testSize: For supervised problems, unsupervised problems we are passing whole input data. datetimeFeature: data time feature for autoencoder mv_featurebased_ad_status: If <True> univariate feature for autoencoder enabled. We findout anomalies for each features selected by user. ''' ##For autoencoder (encoder-decoder) based base class, keras Model class act as meta class. Config params received from AION config file (GUI). The below class instance have keras subclassing call to run encoder and decoder. class AeDetector(Model): def __init__(self,train,test,units,latent_units,activation): super(AeDetector, self).__init__() #Because anomaly detection ,we r using 'sigmoid' activation for all problems last_layer_activation='sigmoid' self.encoder = tf.keras.Sequential([ Dense(units, activation=activation), Dense((units/2), activation=activation), Dense(latent_units, activation=activation) ]) self.decoder = tf.keras.Sequential([ Dense((units/2), activation=activation), Dense(units, activation=activation), Dense(train.shape[1], activation=last_layer_activation) ]) ## Using keras subclassing api def call(self, x): encoded = self.encoder(x) decoded = self.decoder(encoded) return decoded ##This below function create get_datetime class python file in target->scripts folder '''This aion_gettimegranularity class is used to retrive the time pattern (Granularity) of given datetime feature.''' class aion_gettimegranularity: cls_name="datetimeinformation" def __init__(self,dataframe, datetimefeature): self.df=dataframe self.datetimefeature=datetimefeature # self.log=logging.getLogger('AION') self.log = logging.getLogger('eion') self.log.info("To retrive the granularity of given datetime feature by aion.") def get_dfinfo(self,df): from io import StringIO buf = StringIO() df.info(buf=buf) #self.log.info(buf.getvalue()) return buf.getvalue() ##Main time granularity find function def get_granularity(self): try: ##get local df df_t=self.df buf_info=self.get_dfinfo(df_t) self.log.info(buf_info) df_t.drop(df_t.filter(regex='Unname'),axis=1,inplace=True) try: df_t[self.datetimefeature] = pd.to_datetime(df_t[self.datetimefeature]) except Exception as e: self.log.info("Datetime feature to python datetime format convertion error.\n"+str(e)) df_t['time_diff']=df_t[self.datetimefeature].diff().shift(-1) datetime_mean=df_t['time_diff'].mean() totsec = datetime_mean.total_seconds() ## Dict variable to store datetime details.Initialized all date param as False. status_time={"h":False,"m":False,"s":False, "us":False,"ns":False,"Y":False,"M":False,"D":False} if (datetime_mean.days == 0): if (totsec/3600 > 1): ## hour status_time['h']=True else: if (totsec/60 >1): ## minute status_time['m']=True else: if (totsec <= 1e-06 and totsec > 1e-09): ## microsecond status_time['us']=True elif (totsec<= 1e-09 and totsec >=1e-012): ## nanosecond status_time['ns']=True else: ## second status_time['s']=True else: days=datetime_mean.days if (days/365>1): ## year status_time['Y']=True else: if (days>30): ## month status_time['M']=True else: ## day status_time['D']=True time_pattern=None for k,v in status_time.items(): if (v == True): time_pattern=k self.log.info("<----- DateTime feature pattern (year/month/day/hour/minute/second/millisecond/microsecond/nanosecond) is: \t"+str(time_pattern)) try: try: df_t[self.datetimefeature] = pd.to_datetime(df_t[self.datetimefeature]) except Exception as e: pass df_t['Time_diff'] = ((df_t[self.datetimefeature])).diff(-1).dt.floor('T').dt.total_seconds().div(60).abs() time_threshold=1 df_t['anomalyType'] = (np.where((df_t['Time_diff'] != 1),"Point","Sequence")) df_t.drop("Time_diff",axis=1,inplace=True) except Exception as e: self.log.info("time_diff err message: "+str(e)) except Exception as e: print("time_diff err message: ",str(e)) return df_t ## AION Anomaly detection autoencoder main class. It receives input params from anomalyDetector class class anomalyDetectionAE: def __init__(self,df,paramSpace,deployLocation,target,anomalyMethod,testSize,datetimeFeature,mv_featurebased_ad_status): self.mc=None self.es=None #aion gui inputs self.df=df self.paramSpace=paramSpace self.deployLocation=deployLocation self.target=target self.anomalyMethod=anomalyMethod self.testSize=round(testSize,1) self.datetimeFeature=datetimeFeature self.log = logging.getLogger('eion') self.mv_featurebased_ad_status=mv_featurebased_ad_status """ Uncomment below for debug purpose. """ # self.log.info("anomalyDetectionAE constructor: df head: \n"+str(df.head())) # self.log.info("anomalyDetectionAE constructor: df type: \n"+str(type(df))) # self.log.info("anomalyDetectionAE constructor: df len: \n"+str(len(df))) # self.log.info("anomalyDetectionAE constructor: self.datetimeFeature: \n"+str((self.datetimeFeature))) def targetnumClass(self,data): count_classes = pd.value_counts(data, sort = True) num_of_classes= len(count_classes) return num_of_classes def configload(self): cwd = os.path.abspath(os.path.dirname(__file__)) file_name='config.json' try: config_file=os.path.normpath(os.path.join(cwd,'config',file_name)) except Exception as e: self.log.info("<---- config path error. Error Msg: ---->"+str(e)) with open(config_file, 'r') as file: data = json.load(file) datapath=str(data["data"]) target=str(data["target"]) anomaly_algorithm=str(data["anomalydetection_algorithm"]) ae_hyperparameter=data["autoEncoder"] dbscan_hyperparameter=data["DBScan"] return datapath,target,ae_hyperparameter,anomaly_algorithm,dbscan_hyperparameter ## model summary def summary(self,model): return model.summary() ##To load ae model def model_load(self, path): cwd = os.path.abspath(os.path.dirname(__file__)) file_name=path try: model_location=os.path.normpath(os.path.join(cwd,'model',file_name)) except Exception as e: self.log.info("<---- Model path error. Error Msg: ---->"+str(e)) loaded_model = joblib.load(model_location) return loaded_model ## Load dataset def dataload(self,datapath): cwd = os.path.abspath(os.path.dirname(__file__)) file_name=datapath try: data_file=os.path.normpath(os.path.join(cwd,'data',file_name)) except Exception as e: self.log.info("<---- data path error. Error Msg:: ---->"+str(e)) df = pd.read_csv(data_file) return df ## Create dataframe with time sequence data, if not time series, sequence length always 1. def create_dataset(self,X, y, time_steps=1): Xs, ys = [], [] for i in range(len(X) - time_steps): v = X.iloc[i:(i + time_steps)].values Xs.append(v) ys.append(y.iloc[i + time_steps]) return np.array(Xs), np.array(ys) ## model for time series based AE encoder, decoder fn def aetsmodel_lstm(self,n_dims, n_timesteps, n_bottleneck,units,activation,df): # inputs = Input(shape = (n_timesteps, n_dims)) inputs = Input(shape = (df.shape[1], df.shape[2])) e = keras.layers.LSTM(units, activation = activation, return_sequences = True)(inputs) ## code layer or compressed form of data produced by the autoencoder, bottleneck layer latent_space = keras.layers.LSTM(n_bottleneck, activation = activation, return_sequences = False, name = 'bottleneck_layer')(e) e = keras.layers.RepeatVector(n_timesteps)(latent_space) decoder = keras.layers.LSTM(n_bottleneck, activation = activation, return_sequences = True)(e) decoder = keras.layers.LSTM(units, activation = activation, return_sequences = True)(decoder) outputs = keras.layers.TimeDistributed(Dense(n_dims))(decoder) model = Model(inputs = inputs, outputs = outputs) return model ## adding some model checkpoints to ensure the best values will be saved and early stopping to prevent the model from running unnecessary. def callbacks(self, **kwargs): self.mc = ModelCheckpoint(filepath = kwargs.get("filename"), save_best_only = True, verbose = 0) self.es = EarlyStopping(monitor = kwargs.get("monitor"), patience = kwargs.get("patience")) return self.es,self.mc ##This below function create get_datetime class python file in target->scripts folder '''This aion_gettimegranularity class is used to retrive the time pattern (for getting time granularity) of given datetime feature.''' def create_datetime_pyfile(self): try: datetimepattern_code=r"""## import pandas as pd import numpy as np class aion_gettimegranularity: cls_name="datetimeinformation" def __init__(self,dataframe, datetimefeature): self.df=dataframe self.datetimefeature=datetimefeature def get_dfinfo(self,df): from io import StringIO buf = StringIO() df.info(buf=buf) #print(buf.getvalue()) return buf.getvalue() def get_granularity(self): try: ##get local df df_t=self.df buf_info=self.get_dfinfo(df_t) df_t.drop(df_t.filter(regex='Unname'),axis=1,inplace=True) try: df_t[self.datetimefeature] = pd.to_datetime(df_t[self.datetimefeature]) except Exception as e: pass # print("Datetime feature to python datetime format convertion error.\n",e) df_t['time_diff']=df_t[self.datetimefeature].diff().shift(-1) datetime_mean=df_t['time_diff'].mean() totsec = datetime_mean.total_seconds() ## Dict variable to store datetime details.Initialized all date param as False. status_time={"h":False,"m":False,"s":False,"us":False,"ns":False,"Y":False,"M":False,"D":False} if (datetime_mean.days == 0): if (totsec/3600 > 1): ## hour status_time['h']=True else: if (totsec/60 >1): ## minute status_time['m']=True else: if (totsec <= 1e-06 and totsec > 1e-09): ## microsecond status_time['us']=True elif (totsec<= 1e-09 and totsec >=1e-012): ## nanosecond status_time['ns']=True else: ## second status_time['s']=True else: days=datetime_mean.days if (days/365>1): ## year status_time['Y']=True else: if (days>30): ## month status_time['M']=True else: ## day status_time['D']=True time_pattern=None for k,v in status_time.items(): if (v == True): time_pattern=k #print("<----- DateTime feature pattern (year/month/day/hour/minute/second/millisecond/microsecond/nanosecond) is: \t",(time_pattern)) try: try: df_t[self.datetimefeature] = pd.to_datetime(df_t[self.datetimefeature]) except Exception as e: pass df_t['Time_diff'] = ((df_t[self.datetimefeature])).diff(-1).dt.floor('T').dt.total_seconds().div(60).abs() time_threshold=1 df_t['anomalyType'] = np.where((df_t['Time_diff'] != 1),"Point","Sequence") df_t.drop("Time_diff",axis=1,inplace=True) except Exception as e: print("time_diff err message: ",str(e)) except Exception as e: pass # print("get_granularity err msg: ",(e)) return df_t """ cwd=self.deployLocation file_name='aion_granularity'+'.py' try: data_file=os.path.normpath(os.path.join(cwd,'script',file_name)) with open(data_file,'w') as file: file.write(datetimepattern_code) except Exception as error: self.log.info("<---- datetimepattern_code write Error.: ---->"+str(error)) self.log.info("datetimepattern source code created at target folder...\n") except Exception as error: self.log.info("<---- datetimepattern_code function Error.: ---->"+str(error)) ## Simple mlp based autoencoder model, not used now. # def aetsmodel_lstm(self,X_train): # model = keras.Sequential() # # autoencoder encoder # model.add(keras.layers.LSTM( # units=64, # input_shape=(X_train.shape[1], X_train.shape[2]) # )) # model.add(keras.layers.Dropout(rate=0.2)) # model.add(keras.layers.RepeatVector(n=X_train.shape[1])) # # autoencoder decoder # model.add(keras.layers.LSTM(units=64, return_sequences=True)) # model.add(keras.layers.Dropout(rate=0.2)) # model.add( # keras.layers.TimeDistributed( # keras.layers.Dense(units=X_train.shape[2]) # ) # ) # return model ## To find optimal anomaly threshold value def find_threshold(self,model, x_train_scaled): reconstructions = model.predict(x_train_scaled) # provides losses of individual instances msle reconstruction_errors = tf.keras.losses.mae(reconstructions, x_train_scaled) # threshold for anomaly scores threshold = np.mean(reconstruction_errors.numpy())+ 2*np.std(reconstruction_errors.numpy()) return threshold ## compiling the model with adam optimizer and mean squared error loss def model_compile(self, model,lr, loss, opt): if opt == "adam": opt = Adam(learning_rate = lr) else: opt = SGD(learning_rate = lr) model.compile(loss = loss, optimizer = opt) ## save anomaly points in aion target folder def save_anomalyvalues(self,df,file_name): # cwd = os.path.abspath(os.path.dirname(__file__)) cwd=self.deployLocation file_name=file_name+'.csv' try: out_path=os.path.normpath(os.path.join(cwd,'output')) if not os.path.isdir(out_path): os.makedirs(out_path) data_file=os.path.normpath(os.path.join(cwd,'output',file_name)) except Exception as error: self.log.info("<---- autoencoder artifact_dir path. Error Msg: ---->"+str(error)) try: df.to_csv(data_file,index=False) except Exception as e: self.log.info("<---- Saving log data frame error. Error Msg: ---->"+str(e)) ## model summary def summary(self,model): return model.summary() ##Method to find subsequence and point anomalies aion_gettimegranularity def find_point_subsequence_anomalies(self,datetime_column,dataframe=None): try: dataframe.reset_index(level=0, inplace=True) try: dataframe[datetime_column] = pd.to_datetime(dataframe[datetime_column]) except Exception as e: self.log.info("Dataframe contains no datetime feature.Err.Msg: \n"+str(e)) pass try: ##Below commented part using normalize with time delta, find point anomalies.But not used,just for reference. ##get day to check difference #date_f = dataframe[datetime_column].dt.normalize() ##compare successive rows and identify group size #dataframe['anomaly_value'] = np.where(dataframe[datetime_column].groupby(date_f.ne(date_f.shift()).cumsum()).transform('size').gt(1),'subsequence_anomaly', 'Point_anomaly') ##Using get_timepattern method aion_gettimegranularity_obj=aion_gettimegranularity(dataframe,datetime_column) anomaly_info_df=aion_gettimegranularity_obj.get_granularity() except Exception as e: self.log.info("find_point_subsequence_anomalies,: aion_gettimegranularity err msg:: \n"+str(e)) self.log.info("find_point_subsequence_anomalies,: anomaly_info_df: \n"+str(anomaly_info_df)) except Exception as e: self.log.info("find_point_subsequence_anomalies,: err msg:: \n"+str(e)) return anomaly_info_df ## Auto encoder time series function call ## dataframe info() not working for py logging, so workaround we can get information in buffer and log it. def get_df_info(self,df): from io import StringIO buf = StringIO() df.info(buf=buf) #self.log.info(buf.getvalue()) return buf.getvalue() ## Method to detect time series based anomalies in user data. Using both lstm and dense based autoencoder approaches. def aionAEAnomalyTS(self,df,test_size_perc,target,time_steps,dropout,mv_unique_feature_ad): ae_hyperparameter=self.paramSpace anomaly_algorithm=self.anomalyMethod # test_size=float(self.testSize) test_size=0.0 # train_size=1-test_size train_size=1-test_size # train_size_perc=train_size*100 train_size=int(len(df) * train_size) try: timeseries_layers=ae_hyperparameter['timeseries_layers'] ## Here we are checking whether to use only LSTM layers for dnn or dense layers. Dense layers better for predicting point as well sequence anomalies in time series. if (timeseries_layers.lower() == 'lstm'): try: ## Need to get normalized data for threshold calculation. data_mean=df.mean(axis=0) data_std=df.std(axis=0) data=(df-data_mean)/data_std # train, test = df[:train_size], df[train_size:] train, test = data[:train_size], data[train_size:] test=train test1=test ## Need to copy test data train_index=train.index test_index=test.index cols = df.columns # train, test = train_test_split(df, test_size=test_size,random_state=42) X_train, y_train = self.create_dataset( train, train, time_steps ) X_test, y_test = self.create_dataset( test, test, time_steps ) n_dims=X_train.shape[2] n_timesteps=X_train.shape[1] opt=ae_hyperparameter['optimizer'] loss_fn=ae_hyperparameter["loss"] epochs=int(ae_hyperparameter['epochs']) batch_size=int(ae_hyperparameter['batch_size']) learning_rate=float(ae_hyperparameter['learning_rate']) n_bottleneck=int(ae_hyperparameter['latentspace_size']) units=int(ae_hyperparameter['hidden_units']) activation=ae_hyperparameter['activation'] ##For task 20731 minimum_threshold_user = str(ae_hyperparameter['min_threshold']) maximum_threshold_user = str(ae_hyperparameter['max_threshold']) autoencoder=self.aetsmodel_lstm(n_dims, n_timesteps, n_bottleneck,units,activation,X_train) ##To save file # cwd = os.path.abspath(os.path.dirname(__file__)) cwd=self.deployLocation try: artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir')) if not os.path.isdir(artifact_dir): os.makedirs(artifact_dir) except Exception as e: self.log.info("<---- Autoencoder artifact_dir path error. Error Msg: ---->"+str(e)) #dl callback fn to get best loss fn, early stopping & model checkpoint call backs es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = "val_loss") self.model_compile(autoencoder,learning_rate, loss_fn, opt) X_train = np.reshape(X_train,(X_train.shape[0],X_train.shape[1],X_train.shape[2])) X_test = X_test.reshape((X_test.shape[0], X_test.shape[1],n_dims)) # y_test = y_test.reshape((y_test.shape[0], y_test.shape[1], n_dims)) model_hist = autoencoder.fit( X_train, X_train, epochs=epochs, batch_size=batch_size, validation_split=0.1, shuffle=False,callbacks = [mc, es] ) model_info=self.summary(autoencoder) X_train_pred = autoencoder.predict(X_train) train_mae_loss = np.mean(np.abs(X_train_pred - X_train), axis=1) ## Task 20731 if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = float(minimum_threshold_user) elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())): threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = float(minimum_threshold_user) elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) else: threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) # threshold = np.mean(train_mae_loss) + np.std(train_mae_loss) self.log.info("Anomaly threshold max value based on loss fn (MAE): "+str(threshold)) self.log.info("Anomaly threshold min value based on loss fn (MAE): "+str(min_threshold)) X_test_pred = autoencoder.predict(X_test) test_mae_loss = np.mean(np.abs(X_test_pred - X_test), axis=1) test_score_df = pd.DataFrame(index=test_index[time_steps:]) if (n_dims >1): columns = [f'loss_{num}' for num in range(n_dims)] # test_score_df = pd.DataFrame(test_mae_loss, columns=columns, index=test_index[time_steps:]) test_score_df['loss'] = test_mae_loss.mean(axis=1) else: test_score_df['loss'] = test_mae_loss test_score_df['max_threshold'] = threshold test_score_df['min_threshold'] = min_threshold test_score_df['anomaly_value'] = (test_score_df.loss > test_score_df.max_threshold) test_score_df['anomaly_value'] = (test_score_df.loss < test_score_df.min_threshold) ## Newly added for lstm issue ## if coming dataframe have datetime index , copy it before concat (different indexed dfs) import pandas.api.types as ptypes # if (isinstance(test_score_df, pd.DatetimeIndex) and isinstance(df, pd.DatetimeIndex)): test_cp_index=None if (ptypes.is_datetime64_dtype(test_score_df.index) and ptypes.is_datetime64_dtype(df.index)): # self.log.info("test_score_df and df have datetime index cols") test_cp_index=test_score_df.index df_cp_index=df.index test_score_df=test_score_df.reset_index() df=df.reset_index() ##self.datetimeFeature test_score_df.dropna() try: test_score_df[self.datetimeFeature]=pd.to_datetime(test_score_df[self.datetimeFeature]) df[self.datetimeFeature]=pd.to_datetime(df[self.datetimeFeature]) except: pass try: final_df=pd.DataFrame() cols_to_use = df.columns.difference(test_score_df.columns) final_df = pd.merge(test_score_df, df[cols_to_use], left_index=True, right_index=True, how='inner') except Exception as e: self.log.info("final_df creation err msg: \n: "+str(e)) else: test_index=test_score_df.reset_index(drop=True) test_cp_index=test_index.index df_index=df.reset_index(drop=True) final_df=pd.DataFrame() final_df = test_score_df.join(df) final_df.dropna() ##Again set datetime index to dataframes,drop datetime feature column and set it as index. try: final_df.set_index(self.datetimeFeature,inplace=True) df.set_index(self.datetimeFeature,inplace=True) df.drop(self.datetimeFeature,axis=1,inplace=True) final_df.drop(self.datetimeFeature,axis=1,inplace=True) except: pass ## Below commented code used to print df.info() in log file (using get_df_info() methos). # self.log.info("anomaly final_df info: \n") # buf_info=self.get_df_info(final_df) # self.log.info(buf_info) # final_df=pd.DataFrame() ##Getback the datetime index back final_df.index=test_cp_index normal_prediction_df=test_score_df.loc[test_score_df['anomaly_value']==False] anomaly_prediction_df=test_score_df.loc[test_score_df['anomaly_value']==True] ## Newly added for lstm issue anomaly_prediction_df=pd.merge(anomaly_prediction_df, final_df, on=['loss', 'max_threshold','min_threshold', 'anomaly_value'], how="left") # anomaly_prediction_df.fillna(anomaly_prediction_df.mean(), inplace=True) anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace([np.inf, -np.inf], np.nan) # anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace([np.inf, -np.inf], np.nan) final_df['anomaly_value'] = final_df['anomaly_value'].replace([np.inf, -np.inf], np.nan) anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace({True: 1, False: 0}) final_df['anomaly_value'] = final_df['anomaly_value'].replace({True:1, False: 0}) #make sure no nan values after dataframe operations anomaly_prediction_df.dropna() final_df.dropna() # anomal_loss_threshold=anomaly_prediction_df #use if we want to save loss and threshold as dataframe info. self.log.info("Anomaly data with loss and threshold informations: \n"+str(anomaly_prediction_df)) """ Saving anomaly plots in target->output->anomaly_plot folder """ ## Goto if cond for multivariate whole dataset anomaly prediction, else goto else part for feature based ad prediction. if (mv_unique_feature_ad.lower()=='false'): for col in df.columns: df_subset = anomaly_prediction_df[col] fig, ax = plt.subplots() df[col].plot(legend=False, ax=ax) df_subset.plot(legend=False, ax=ax, color="r") plot_name=col ax.set_title(plot_name+"_Anomaly Data Plot") ax.set_xlabel("DateTime") ax.set_ylabel("Values") plot_name=plot_name+'_'+'anomalyplot.png' try: plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) if not os.path.isdir(plot_dir): os.makedirs(plot_dir) plotpath=str(plot_dir)+'/'+plot_name except Exception as e: self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e)) if os.path.exists(plotpath): os.remove(plotpath) plt.savefig(plotpath) # plt.savefig(str(plot_dir)+'/'+plot_name) plt.clf() plt.cla() plt.close() else: df_subset = anomaly_prediction_df fig, ax = plt.subplots() df.plot(legend=False, ax=ax) ax.set_title("Anomaly Data Plot") ax.set_xlabel("X values") ax.set_ylabel("Y Values") df_subset.plot(legend=False, ax=ax, color="r") plot_name=df.columns[0] ax.set_title(plot_name+"_Anomaly Data Plot") # ax.set_xlabel("DateTime") # ax.set_ylabel("Values") # plot_name=df.columns[0] plot_name=plot_name+'_'+'anomalyplot.png' try: plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) if not os.path.isdir(plot_dir): os.makedirs(plot_dir) plotpath=str(plot_dir)+'/'+plot_name except Exception as e: self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e)) if os.path.exists(plotpath): os.remove(plotpath) plt.savefig(plotpath) # plt.savefig(str(plot_dir)+'/'+plot_name) plt.clf() plt.cla() plt.close() #process dt feature and save anomalies. datetime_column=str(self.datetimeFeature) try: anomaly_prediction_df=self.find_point_subsequence_anomalies(datetime_column,anomaly_prediction_df) # normal_prediction_df=self.find_point_subsequence_anomalies(datetime_column,normal_prediction_df) except: ##If any issue in time series point anomaly detection, skip it. self.log.info("Detecting point anomalies have some issue,check datetime feature.") pass combined_df=pd.concat([anomaly_prediction_df,normal_prediction_df],ignore_index=True) combined_df['anomaly_value']=combined_df['anomaly_value'].fillna('Normal_Data') ## If categorical features in original df, then inverse transform the values. anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace({1: "Anomaly", 0: "Normal"}) final_df['anomaly_value'] = final_df['anomaly_value'].replace({1: "Anomaly", 0: "Normal"}) ##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature. if (mv_unique_feature_ad.lower()=='true'): ## Multivariate and saving individual feature based anomalies self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_ts_anomaly_dataframe')) # self.save_anomalyvalues(combined_df,(str(feature_name)+'_ts_overall_dataframe')) try: final_df=self.merge_pre_post_dfs(final_df) except Exception as e: self.log.info("Anomaly Detection Merge df exception:\n"+str(e)) #If merge fails, just out! pass self.save_anomalyvalues(final_df,(str(feature_name)+'_ts_overall_dataframe')) ## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line # self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt')) ## Save actual test data test_score_df #self.save_anomalyvalues(test_score_df,(str(feature_name)+'_testdata')) else: self.save_anomalyvalues(anomaly_prediction_df,'ts_anomaly_dataframe') # self.save_anomalyvalues(combined_df,'ts_normal_anomaly_dataframe') try: final_df=self.merge_pre_post_dfs(final_df) except Exception as e: self.log.info("Anomaly Detection Merge df exception:\n"+str(e)) #If merge fails, just out! pass self.save_anomalyvalues(final_df,'ts_overall_dataframe') ## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line # self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt')) ## Save actual test data test_score_df #self.save_anomalyvalues(test_score_df,'testdata') anomaly_info_df=final_df self.log.info("<---- Autoencoder time series data anomalies: ---->"+str(anomaly_prediction_df)) self.log.info("<---- Autoencoder time series:Number of anomalies in data:: ---->"+str(len(anomaly_prediction_df))) # return model except Exception as e: self.log.info("AD lstm traceback error: \n"+str(traceback.format_exc())) ## Dense layer based time series AD, most real world usecases, it is working best compared to lstm based.. elif (timeseries_layers.lower() == 'dense'): try: feature_name=df.columns feature_name = ' '.join(map(str, feature_name)) try: #Passing whole data,so test size set as zero. test_size=0.0 # train_size=1-test_size train_size=1-test_size # train_size_perc=train_size*100 train_size=int(len(df) * train_size) train_data,test_data = df[:train_size], df[train_size:] test_data=train_data except: #If any error comes,us sklearn train test split train_data,test_data = train_test_split(df,test_size=test_size,random_state=42) pass test_index=test_data.index ## to get datetime index units=int(ae_hyperparameter['hidden_units']) latent_units=int(ae_hyperparameter['latentspace_size']) activation=ae_hyperparameter['activation'] ##For task 20731 minimum_threshold_user = str(ae_hyperparameter['min_threshold']) maximum_threshold_user = str(ae_hyperparameter['max_threshold']) train_data=train_data.values test_data=test_data.values ## tss is time series flag, true or false autoencoder = AeDetector(train_data,test_data,units,latent_units,activation) opt=ae_hyperparameter['optimizer'] loss_fn=ae_hyperparameter["loss"] epochs=int(ae_hyperparameter['epochs']) batch_size=int(ae_hyperparameter['batch_size']) learning_rate=float(ae_hyperparameter['learning_rate']) cwd=self.deployLocation try: artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir')) if not os.path.isdir(artifact_dir): os.makedirs(artifact_dir) except Exception as e: self.log.info("<---- artifact_dir path error. Error Msg: ---->"+str(e)) es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = "val_loss") self.model_compile(autoencoder,learning_rate, loss_fn, opt) # autoencoder.compile(optimizer='adam', loss='mae') autoencoder.fit(train_data, train_data, epochs = epochs, batch_size=batch_size, validation_data=(test_data, test_data),callbacks = [mc, es]) # reconstructed = autoencoder(train_data) reconstructed = autoencoder.predict(train_data) train_mae_loss = tf.keras.losses.mae(reconstructed, train_data) ## Task 20731 if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = float(minimum_threshold_user) elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())): threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = float(minimum_threshold_user) elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) else: threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) # threshold = np.mean(train_mae_loss) + np.std(train_mae_loss) self.log.info("Anomaly threshold max value based on loss fn (MAE): "+str(threshold)) self.log.info("Anomaly threshold min value based on loss fn (MAE): "+str(min_threshold)) test_labels=None if (len(self.datetimeFeature) >= 1): time_series_data="True" else: time_series_data="False" pred,test_score_df,actual_data,anomaly_info_df = self.prediction(autoencoder, test_data,min_threshold, threshold,test_labels,time_series_data,time_steps,test_index) # normal_prediction_df=(anomaly_info_df[~anomaly_info_df['anomaly_value']]) normal_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==False] anomaly_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==True] #Below ts_dataframe_anomaly not for production, just for testing purpose. If uncommented, comment it. #self.save_anomalyvalues(anomaly_info_df,'ts_dataframe_normal') # anomal_loss_threshold=anomaly_prediction_df #use if we want to save loss and threshold as dataframe info. self.log.info("Anomaly data with loss and threshold informations: \n"+str(anomaly_prediction_df)) # anomaly_prediction_df_plot=anomaly_prediction_df """ Saving anomaly plots in target->output->anomaly_plot folder """ ## Only for multivariate (all features) based anomaly data plot ## Use of the below part if anomaly df columns came as numerical columns. # if not (df.columns.equals(anomaly_prediction_df.columns)): # num_cols = [] # try: # num_cols=[num_cols.append(float(col)) for col in anomaly_prediction_df.columns.values] # except ValueError: # pass # #Dense layer scaler conversion makes column names as int values, so here find the int cols and rename to original names. # if (num_cols): # anomaly_prediction_df=anomaly_prediction_df[num_cols] # anomaly_prediction_df.columns=df.columns # normal_prediction_df=normal_prediction_df[num_cols] # normal_prediction_df.columns=df.columns ## Goto if cond for multivariate whole dataset anomaly prediction, else goto else part for feature based ad prediction. if (mv_unique_feature_ad.lower()=='false'): # for col in df.columns: for col in actual_data.columns: df_subset = anomaly_prediction_df[col] fig, ax = plt.subplots() df[col].plot(legend=False, ax=ax) df_subset.plot(legend=False, ax=ax, color="r") plot_name=col ax.set_title(plot_name+"_Anomaly Data Plot") ax.set_xlabel("DateTime") ax.set_ylabel("Values") plot_name=plot_name+'_'+'anomalyplot.png' try: plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) if not os.path.isdir(plot_dir): os.makedirs(plot_dir) plotpath=str(plot_dir)+'/'+plot_name except Exception as e: self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e)) if os.path.exists(plotpath): os.remove(plotpath) plt.savefig(plotpath) # plt.savefig(str(plot_dir)+'/'+plot_name) plt.clf() plt.cla() plt.close() else: df_subset = anomaly_prediction_df fig, ax = plt.subplots() df.plot(legend=False, ax=ax) ax.set_title("Anomaly Data Plot") ax.set_xlabel("DateTime") ax.set_ylabel("Values") df_subset.plot(legend=False, ax=ax, color="r") plot_name=df.columns[0] ax.set_title(plot_name+"_Anomaly Data Plot") # ax.set_xlabel("DateTime") # ax.set_ylabel("Values") # plot_name=df.columns[0] plot_name=plot_name+'_'+'anomalyplot.png' try: plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) if not os.path.isdir(plot_dir): os.makedirs(plot_dir) plotpath=str(plot_dir)+'/'+plot_name except Exception as e: self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e)) if os.path.exists(plotpath): os.remove(plotpath) plt.savefig(plotpath) # plt.savefig(str(plot_dir)+'/'+plot_name) plt.clf() plt.cla() plt.close() datetime_column=str(self.datetimeFeature) # anomaly_prediction_df=self.find_point_subsequence_anomalies(datetime_column,anomaly_prediction_df) # normal_prediction_df=self.find_point_subsequence_anomalies(datetime_column,normal_prediction_df) try: anomaly_prediction_df=self.find_point_subsequence_anomalies(datetime_column,anomaly_prediction_df) # normal_prediction_df=self.find_point_subsequence_anomalies(datetime_column,normal_prediction_df) except: self.log.info("Detecting point anomalies have some issue,check datetime feature.") ##Just pass if datetime column provides issue, use without datetime column info pass combined_df=pd.concat([anomaly_prediction_df,normal_prediction_df],ignore_index=True) combined_df['anomaly_value']=combined_df['anomaly_value'].fillna('Normal_Data') ## If categorical features in original df, then inverse transform the values. try: # anomaly_info_df['anomaly_value']=anomaly_info_df['anomaly_value'].astype(str).replace(replace_values_F,'NormalDataPoint', regex=True) self.naming_anomalyvalues(anomaly_info_df) except Exception as e: self.log.info("anomaly_info_df exception err msg: \n"+str(e)) ##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature. if (mv_unique_feature_ad.lower()=='true'): ## Multivariate and saving individual feature based anomalies self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_ts_anomaly_dataframe')) try: anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) except Exception as e: #If merge fails, just out!. self.log.info("Anomaly Detection Merge df exception :\n"+str(e)) finally: #check merging done or not, to be imp. pass self.save_anomalyvalues(anomaly_info_df,(str(feature_name)+'_ts_overall_dataframe')) '''For overall ordered output,uncomment the below.''' # self.save_anomalyvalues(combined_df,(str(feature_name)+'_ts_overall_dataframe_ordered')) ## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line # self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt') ## Save actual test data actual_data #self.save_anomalyvalues(actual_data,(str(feature_name)+'_testdata')) else: self.save_anomalyvalues(anomaly_prediction_df,'ts_anomaly_dataframe') try: anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) except Exception as e: #If merge fails, just out!. self.log.info("Anomaly Detection Merge df exception :\n"+str(e)) finally: #check merging done or not, to be imp. pass self.save_anomalyvalues(anomaly_info_df,'ts_overall_dataframe') #Ordered data # self.save_anomalyvalues(combined_df,'ts_overall_dataframe_ordered') ## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line # self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt'), ## Save actual test data test_score_df #self.save_anomalyvalues(actual_data,'testdata') self.log.info("<---- Autoencoder time series anomalies : ---->"+str(anomaly_prediction_df)) self.log.info("<---- Autoencoder time series, Number of anomalies in data: ---->"+str(len(anomaly_prediction_df))) # self.save_anomalyvalues(anomaly_prediction_df,'ts_anomaly_dataframe') except Exception as e: self.log.info("dense layer anomaly error: \n"+str(traceback.format_exc())) else: self.log.info("Only LSTM and Dense layers supported for time series.") except Exception as e: self.log.info("<---- time series error msg: ---->"+str(e)) self.log.info("<---- time series error msg (detailed): ---->"+str(traceback.format_exc())) return autoencoder,anomaly_prediction_df,anomaly_info_df ## To normalize data,use when necessary def normalize_data(train_data,test_data): min_val=tf.reduce_min(train_data) max_val=tf.reduce_max(train_data) train_data = (train_data - min_val)/(max_val - min_val) test_data = (test_data - min_val)/(max_val - min_val) #converte the data into float train_data = tf.cast(train_data, dtype=tf.float32) test_data = tf.cast(test_data, dtype=tf.float32) return train_data,test_data ## Scaling data ,Not used because of our aion preprocessing data profiler option. use when necessary. def getScaledData(method='standard', train_df=None, test_df=None, feature_col='feature'): from sklearn.preprocessing import StandardScaler if method == 'standard': scaler = StandardScaler() else: scaler = MinMaxScaler() scaler = scaler.fit(train_df[[feature_col]]) train_df['scaled_'+feature_col] = scaler.transform(train_df[[feature_col]]) test_df['scaled_'+feature_col] = scaler.transform(test_df[[feature_col]]) return train_df, test_df, scaler ## prediction fn def prediction(self,model, data,min_threshold, threshold,test_labels,time_series_status,time_steps,test_index): # data1=scaler.inverse_transform(data) try: df_new=self.df.drop(self.datetimeFeature,axis=1,inplace=False) except: df_new=self.df try: actual_data=pd.DataFrame(self.df,columns=df_new.columns) except Exception as e: actual_data=pd.DataFrame(self.df) pass n_features=data.shape[1] self.log.info("prediction: number of features: \n"+str(n_features)) predicted_data = model.predict(data) loss = tf.keras.losses.mae(predicted_data, data) if (time_series_status.lower() == 'true'): test_score_df = pd.DataFrame(index=test_index) actual_data = actual_data.set_index(test_index) anomaly_info_df=pd.DataFrame() test_score_df['loss'] = loss test_score_df['max_threshold'] = threshold test_score_df['min_threshold'] = min_threshold ## Task 20731 #test_score_df['anomaly_value'] = test_score_df.apply(lambda x: x.loss > x.max_threshold or x.loss <= x.min_threshold, axis=1) test_score_df['anomaly_value'] = np.where((test_score_df["loss"] > test_score_df["max_threshold"]) | (test_score_df["loss"] <= test_score_df["min_threshold"]), True, False) anomaly_info_df = pd.concat([actual_data, test_score_df], axis=1) else: test_score_df = pd.DataFrame() anomaly_info_df=pd.DataFrame() test_score_df['loss'] = loss #test_score_df['threshold'] = threshold test_score_df['max_threshold'] = threshold test_score_df['min_threshold'] = min_threshold ## Task 20731 #test_score_df['anomaly_value'] = (test_score_df.loss >= test_score_df.max_threshold) #test_score_df['anomaly_value'] = (test_score_df.loss < test_score_df.min_threshold) test_score_df['anomaly_value'] = np.where((test_score_df["loss"] > test_score_df["max_threshold"]) | (test_score_df["loss"] <= test_score_df["min_threshold"]), True, False) anomaly_info_df = pd.concat([actual_data, test_score_df], axis=1) return tf.math.less(loss, threshold),test_score_df,actual_data,anomaly_info_df ##Not used now, for data ploting purpose # def plot(self,autoencoder,data, n): # enc_img = autoencoder.encoder(data) # dec_img = autoencoder.decoder(enc_img) # plt.plot(data[n], 'b') # plt.plot(dec_img[n], 'r') # plt.fill_between(np.arange(data.shape[1]), data[n], dec_img[n], color = 'lightcoral') # plt.legend(labels=['Input', 'Reconstruction', 'Error']) # plt.show() ## autoencoder fn for non timeseries data def ae_nontimeseriesmodelfn(self,df,target): autoencoder=None mv_unique_feature_ad=self.mv_featurebased_ad_status #For supervised non time series problems, we need to remove datetime feature. This will help scaler algs process the numeric data only. try: if (target == ''): try: test_size=0.0 # train_size=1-test_size train_size=1-test_size # train_size_perc=train_size*100 train_size=int(len(df) * train_size) train_data,test_data = df[:train_size], df[train_size:] test_data=train_data except: test_size=float(self.testSize) train_data,test_data = train_test_split(df,test_size=test_size,random_state=42) pass ae_hyperparameter=self.paramSpace units=int(ae_hyperparameter['hidden_units']) latent_units=int(ae_hyperparameter['latentspace_size']) activation=ae_hyperparameter['activation'] ##For task 20731 minimum_threshold_user = str(ae_hyperparameter['min_threshold']) maximum_threshold_user = str(ae_hyperparameter['max_threshold']) train_data=train_data.values test_data=test_data.values autoencoder = AeDetector(train_data,test_data,units,latent_units,activation) opt=ae_hyperparameter['optimizer'] loss_fn=ae_hyperparameter["loss"] # loss_fn='binary_crossentropy' epochs=int(ae_hyperparameter['epochs']) batch_size=int(ae_hyperparameter['batch_size']) learning_rate=float(ae_hyperparameter['learning_rate']) # autoencoder.save('../output/autoenc',save_format='tf') # cwd = os.path.abspath(os.path.dirname(__file__)) cwd=self.deployLocation try: artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir')) if not os.path.isdir(artifact_dir): os.makedirs(artifact_dir) except Exception as e: self.log.info("<---- artifact_dir path error. Error Msg: ---->"+str(e)) es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = "val_loss") # es,mc=self.callbacks(filename = "../output/autoenc.sav", patience = 3, monitor = "val_loss") self.model_compile(autoencoder,learning_rate, loss_fn, opt) # autoencoder.compile(optimizer='adam', loss='mae') autoencoder.fit(train_data, train_data, epochs = epochs, batch_size=batch_size, validation_data=(test_data, test_data),callbacks = [mc, es]) reconstructed = autoencoder(train_data) train_mae_loss = tf.keras.losses.mae(reconstructed, train_data) #threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) #min_threshold = np.mean(train_mae_loss)- 2*np.std(train_mae_loss) ## Task 20731 if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = float(minimum_threshold_user) elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())): threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = float(minimum_threshold_user) elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) else: threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) # threshold = np.mean(train_mae_loss) + np.std(train_mae_loss) self.log.info("Anomaly Upper Threshold value based on loss fn (MAE): "+str(threshold)) self.log.info("Anomaly lower_threshold value based on loss fn (MAE): "+str(min_threshold)) test_labels=None ## No test labels passed pred,test_score_df,actual_data,anomaly_info_df = self.prediction(autoencoder, test_data, min_threshold,threshold,test_labels,'False',None,None) # normal_prediction_df=(anomaly_info_df[~anomaly_info_df['anomaly_value']]) normal_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==False] anomaly_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==True] self.log.info("<---- Autoencoder (non timeseries) based anomaly detection, anomalies in data: ---->"+str(anomaly_prediction_df)) self.log.info("<---- Number of anomalies in data: ---->"+str(len(anomaly_prediction_df))) self.save_anomalyvalues(anomaly_prediction_df,'anomaly_dataframe') # combined_df=pd.concat([anomaly_prediction_df,normal_prediction_df],ignore_index=True) self.log.info("<---- Autoencoder (non timeseries) based anomaly detection, overall data (both anomaly and non-anomaly ) in data: ---->"+str(anomaly_info_df)) # self.save_anomalyvalues(combined_df,'overall_dataframe') ## If categorical features in original df, then inverse transform the values. try: ##anomaly_info_df,total dataframe. self.naming_anomalyvalues(anomaly_info_df) except Exception as e: self.log.info("anomaly_info_df exception err msg: \n"+str(e)) ##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature. if (mv_unique_feature_ad.lower()=='true'): ## Multivariate and saving individual feature based anomalies self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_anomaly_dataframe')) try: anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) except Exception as e: #If merge fails, just out!. self.log.info("Anomaly Detection Merge df exception :\n"+str(e)) finally: #check merging done or not, to be imp. pass self.save_anomalyvalues(anomaly_info_df,(str(feature_name)+'_overall_dataframe')) ## Save actual test data actual_data #self.save_anomalyvalues(actual_data,(str(feature_name)+'_testdata')) else: self.save_anomalyvalues(anomaly_prediction_df,'anomaly_dataframe') try: anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) except Exception as e: #If merge fails, just out!. self.log.info("Anomaly Detection Merge df exception :\n"+str(e)) finally: #check merging done or not, to be imp. pass self.save_anomalyvalues(anomaly_info_df,'overall_dataframe') #Ordered data # self.save_anomalyvalues(combined_df,'ts_overall_dataframe_ordered') ## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line # self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt'), ## Save actual test data test_score_df #self.save_anomalyvalues(actual_data,'testdata') self.log.info("<---- Autoencoder non time series / supervised problem anomalies : ---->"+str(anomaly_prediction_df)) #ploting df_subset = anomaly_prediction_df fig, ax = plt.subplots() df.plot(legend=False, ax=ax) df_subset.plot(legend=False, ax=ax, color="r") ax.set_title("Anomaly Data Plot") ax.set_xlabel("DateTime") ax.set_ylabel("Values") plot_name='anomalyplot.png' try: plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) if not os.path.isdir(plot_dir): os.makedirs(plot_dir) plotpath=str(plot_dir)+'/'+plot_name except Exception as e: self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e)) if os.path.exists(plotpath): os.remove(plotpath) plt.savefig(plotpath) # plt.savefig(str(plot_dir)+'/'+plot_name) plt.clf() plt.cla() plt.close() else: y=df[target] X=df.drop(target, axis=1) train_data,test_data,train_labels,test_labels=train_test_split(X,y,test_size=0.2,random_state=42) count_classes = pd.value_counts(df[target], sort = True) num_of_classes= len(count_classes) self.log.info("train_data info: \n"+str(train_data.info())) if (num_of_classes >= 2): # scaler = StandardScaler() # train_data = scaler.fit_transform(train_data) # test_data = scaler.fit_transform(test_data) # self.saveScaler(scaler) train_labels = train_labels.astype(bool) test_labels = test_labels.astype(bool) n_train_data = train_data[train_labels] n_test_data = test_data[test_labels] # data1=scaler.inverse_transform(n_test_data) n_test_data_actual=pd.DataFrame(n_test_data) ##anomaly data an_train_data = train_data[~train_labels] an_test_data = test_data[~test_labels] n_train_data = train_data[train_labels] n_test_data = test_data[test_labels] ae_hyperparameter=self.paramSpace # autoencoder = AeDetector(n_train_data,n_test_data) activation=ae_hyperparameter['activation'] units=int(ae_hyperparameter['hidden_units']) latent_units=int(ae_hyperparameter['latentspace_size']) ##For task 20731 minimum_threshold_user = str(ae_hyperparameter['min_threshold']) maximum_threshold_user = str(ae_hyperparameter['max_threshold']) autoencoder = AeDetector(n_train_data,n_test_data,units,latent_units,activation) opt=ae_hyperparameter['optimizer'] loss_fn=ae_hyperparameter["loss"] batch_size=int(ae_hyperparameter['batch_size']) # loss_fn='binary_crossentropy' epochs=int(ae_hyperparameter['epochs']) learning_rate=float(ae_hyperparameter['learning_rate']) cwd=self.deployLocation try: artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir')) if not os.path.isdir(artifact_dir): os.makedirs(artifact_dir) except Exception as e: self.log.info("<---- artifact_dir path error. Error Msg: ---->"+str(e)) es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = "val_loss") self.model_compile(autoencoder,learning_rate, loss_fn, opt) # autoencoder.compile(optimizer='adam', loss='mae') autoencoder.fit(n_train_data, n_train_data, epochs = epochs, batch_size=batch_size, validation_data=(n_test_data, n_test_data),callbacks = [mc, es]) model_info=self.summary(autoencoder) self.log.info("<---- Auto encoder anomaly detection model information: ---->"+str(model_info)) # reconstructed = autoencoder(n_train_data) reconstructed = autoencoder.predict(n_train_data) #threshold = self.find_threshold(autoencoder, n_train_data) train_mae_loss = tf.keras.losses.mae(reconstructed, n_train_data) pred=tf.math.less(train_mae_loss, threshold) ## Task 20731 if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = float(minimum_threshold_user) elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())): threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = float(minimum_threshold_user) elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())): threshold = float(maximum_threshold_user) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) else: threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss) min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss) self.log.info("Anomaly threshold max value based on loss fn (MAE): "+str(threshold)) self.log.info("Anomaly threshold min value based on loss fn (MAE): "+str(min_threshold)) pred,test_score_df,actual_data,anomaly_info_df = self.prediction(autoencoder, n_test_data, min_threshold,threshold,test_labels,'False',None,None) # normal_prediction_df=(anomaly_info_df[~anomaly_info_df['anomaly_value']]) normal_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==False] # normal_prediction_df.to_csv('normal_prediction_df.csv') # anomaly_prediction_df=(anomaly_info_df[anomaly_info_df['anomaly_value']]) anomaly_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==True] self.log.info("<---- Autoencoder (non timeseries) based anomaly detection, overall data (both anomaly and non-anomaly ) in data: ---->"+str(anomaly_info_df)) # self.save_anomalyvalues(combined_df,'overall_dataframe') ## If categorical features in original df, then inverse transform the values. try: ##anomaly_info_df,total dataframe. self.naming_anomalyvalues(anomaly_info_df) except Exception as e: self.log.info("anomaly_info_df exception err msg: \n"+str(e)) ##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature. if (mv_unique_feature_ad.lower()=='true'): ## Multivariate and saving individual feature based anomalies self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_anomaly_dataframe')) try: anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) except Exception as e: #If merge fails, just out!. self.log.info("Anomaly Detection Merge df exception :\n"+str(e)) finally: #check merging done or not, to be imp. pass self.save_anomalyvalues(anomaly_info_df,(str(feature_name)+'_overall_dataframe')) ## Save actual test data actual_data #self.save_anomalyvalues(actual_data,(str(feature_name)+'_testdata')) else: self.save_anomalyvalues(anomaly_prediction_df,'anomaly_dataframe') try: anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df) except Exception as e: #If merge fails, just out!. self.log.info("Anomaly Detection Merge df exception :\n"+str(e)) finally: #check merging done or not, to be imp. pass self.save_anomalyvalues(anomaly_info_df,'overall_dataframe') ## Save actual test data test_score_df #self.save_anomalyvalues(actual_data,'testdata') self.log.info("<----Number of anomalies in data: ---->"+str(len(anomaly_prediction_df))) """ Plot to show case anomalies, now commented, for testing purpose uncomment and check visually anomalies. """ #ploting df_subset = anomaly_prediction_df fig, ax = plt.subplots() df.plot(legend=False, ax=ax) df_subset.plot(legend=False, ax=ax, color="r") # plt.show() ax.set_title("Anomaly Data Plot") ax.set_xlabel("DateTime") ax.set_ylabel("Values") plot_name='anomalyplot.png' try: plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) if not os.path.isdir(plot_dir): os.makedirs(plot_dir) plotpath=str(plot_dir)+'/'+plot_name except Exception as e: self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e)) if os.path.exists(plotpath): os.remove(plotpath) plt.savefig(plotpath) # plt.savefig(str(plot_dir)+'/'+plot_name) plt.clf() plt.cla() plt.close() else: self.log.info("<---- Check dataset and basic configurations. ---->") except Exception as e: self.log.info("<---- Non time series anomaly detection error msg: ---->"+str(e)) self.log.info("<---- Non time series anomaly detection error msg (detailed): ---->"+str(traceback.format_exc())) return autoencoder,anomaly_prediction_df,anomaly_info_df ## Hyperparameter tuning autoencoders, not implemented def hyperparamtuning_AE(self): try: self.log.info ("autoencoder hyperparam tuning.not implemented.") except Exception as e: self.log.info("autoencoder hyperparam tuning error: "+str(e)) pass ## randomsearch for dbscan def hyperparamtuning_dbscan(self,model,tuner,Parameter_Trials,data): params=model.get_params().keys() try: labels = model.labels_ #df = pd.DataFrame(labels) try: scorer = metrics.silhouette_score(data, labels) except: pass if (tuner.lower() == 'randomsearch'): # Parameters to try cluster_labels = model.labels_ Random_Search = RandomizedSearchCV(model, Parameter_Trials, n_iter=50,cv=5, scoring='adjusted_rand_score', refit=True, n_jobs=1, verbose=5) RandomSearchResults=Random_Search.fit(data) # Fetching the best hyperparameters best_params=RandomSearchResults.best_params_ # All the parameter combinations tried by RandomizedSearchCV RandomSearchResults.cv_results_['params'] except Exception as e: self.log.info("<---- dbscan hpt error msg: ---->"+str(e)) self.log.info("<---- dbscan hpt error msg (detailed): ---->"+str(traceback.format_exc())) return best_params ## Reading aion postprocess data from target->AION_usecaseNo->data->postprocess data def read_inputdata(self): cwd=self.deployLocation try: in_path=os.path.normpath(os.path.join(cwd,'data')) if not os.path.isdir(in_path): self.log.info("<---- Anomaly detection target data folder not available.--->\n") postprocesseddata=os.path.normpath(os.path.join(cwd,'data','postprocesseddata.csv')) postprocessed_df=pd.read_csv(postprocesseddata) except Exception as e: self.log.info("<---- Anomaly detection target data folder not available, Reading postprocess csv file issue. Error Msg: ---->"+str(e)) return postprocessed_df ## Get original dataframe values using preprocess pipe after output data created. ##get_label_dict fn not used now. Use if preprocess_pipe based transform needed. def get_label_dict(self, pipe): label_dict = {} dict_pipe={} for (comp_name, component) in pipe.transformer_list: if 'labelencoding' in comp_name: i=1 for step in component.steps: key='val'+'_'+str(i) ordinalencoder=step[1] dict_pipe[f'val_{i}']=ordinalencoder # dict_pipe[key].append(ordinalencoder) label_dict.update(dict_pipe) i=i+1 return label_dict else: continue return label_dict ## Decode label features using aion preprocessed_pipe model,not used now. If we need to use preprocess pipe for inverse transform,use below block. def decoder_labeled_features(self,df): import joblib try: cwd=self.deployLocation # in_path=os.path.normpath(os.path.join(cwd,'data')) if not os.path.isdir(in_path): self.log.info("<---- Anomaly detection target model folder not available.--->\n") preprocessed_pipe=os.path.normpath(os.path.join(cwd,'model','preprocess_pipe.pkl')) model = joblib.load(preprocessed_pipe) label_dict = get_label_dict(model) encoder=label_dict.get('val_4') num_cols = orig_data.select_dtypes(include=np.number).columns.tolist() cat_cols = orig_data.select_dtypes(exclude=np.number).columns.tolist() cat_col_actual=[] for col in cat_cols: try: df1=encoder.inverse_transform(df[col]) cat_col_actual.append(col) except: pass df1=pd.DataFrame(data=df1) df1.columns=cat_cols df2=df[num_cols] df_anomalyinfo_col=df['anomaly_value'] df_list = [df2, df1, df_anomalyinfo_col] # List of your dataframes combined_df = pd.concat(df_list, join='outer', axis=1).fillna(0) except: combined_df=None pass return combined_df ## save predicted data and actual data columns. For get back user original data features # def merge_pre_post_dfs(self,out_df=None): cwd=self.deployLocation anomaly_algorithm=str(self.anomalyMethod) try: in_path=os.path.normpath(os.path.join(cwd,'data')) if not os.path.isdir(in_path): self.log.info("<---- Anomaly detection target data folder not available.--->\n") preprocessed_file=os.path.normpath(os.path.join(cwd,'data','preprocesseddata.csv')) preprocessed_df=pd.read_csv(preprocessed_file) ## cat_cols will get categorical col from preprocessed, cat_diff_cols will get common cat col between output df and preprocessed. cat_cols=preprocessed_df.select_dtypes(exclude=np.number).columns.tolist() num_cols = preprocessed_df.select_dtypes(include=np.number).columns.tolist() cat_diff_cols=list(set(cat_cols).intersection(out_df.columns.tolist())) diff_cols=list(set(preprocessed_df.columns).difference(out_df.columns)) if (cat_diff_cols): if (len(preprocessed_df) == len(out_df)): #Drop each categorical col of original data from output df (which have numerical converted values). So, in merging can be done on perfect columns try: ## get common categorical col name between actual and output df for col in cat_diff_cols : out_df.drop(col,axis=1,inplace=True) except: self.log.info("drop col not possible, pass the step.") #Just continue pass diff_cols=list(set(preprocessed_df.columns).difference(out_df.columns)) try: ## Check any datetime column in output df and preprocesseddata import pandas.api.types as ptypes outdf_dt_index_check=ptypes.is_datetime64_dtype(out_df.index) #Is output df have datetime col if (outdf_dt_index_check): if ((self.datetimeFeature.lower() !='na' and self.datetimeFeature)): try: preprocessed_df[self.datetimeFeature] = pd.to_datetime(preprocessed_df[self.datetimeFeature]) preprocessed_df.set_index(self.datetimeFeature, inplace=True) except Exception as e: self.log.info("Given data not contain datetime specified."+str(traceback.format_exc())) ## Below step ,making datetime index to date time column. for merging and droping purpose. preprocessed_df.reset_index(inplace=True) preprocessed_df.rename(columns={"index":self.datetimeFeature},inplace=True) out_df.reset_index(inplace=True) out_df.rename(columns={"index":self.datetimeFeature},inplace=True) else: ## If no datetime column, we need to keep both dataframe index columns as unique. so making them as int index. preprocessed_df.reset_index(inplace=True, drop=True) out_df.reset_index(inplace=True, drop=True) pass ## below part is to get status of index columns type (datetime,int or str), commented now. If needed for debug,pls use. # dt_index_check=ptypes.is_datetime64_dtype(out_df.index) # int_index_check=ptypes.is_numeric_dtype(out_df.index) # str_index_check=ptypes.is_string_dtype(out_df.index) ## Get common column between preprocess and output df try: if (anomaly_algorithm.lower() == 'autoencoder'): common_cols=out_df.drop(['loss','max_threshold','min_threshold','anomaly_value'],axis=1) common_cols.drop(common_cols.filter(regex="Unname"),axis=1, inplace=True) merge_on_cols=common_cols.columns.tolist() combined_df = preprocessed_df.merge(out_df, on=merge_on_cols,how='inner') ## Drop duplicate based on columns except time # drop_duplicate_on=merge_on_cols.extend(cat_diff_cols) drop_duplicate_on=merge_on_cols+cat_diff_cols combined_df = combined_df.drop_duplicates(drop_duplicate_on, keep=False) else: ## otherwise, it is dbscan algorithm common_cols=out_df.drop(['cluster','anomaly_value'],axis=1) common_cols.drop(common_cols.filter(regex="Unname"),axis=1, inplace=True) merge_on_cols=common_cols.columns.tolist() combined_df = preprocessed_df.merge(out_df, on=merge_on_cols,how='inner') ## Drop duplicate based on columns except time # drop_duplicate_on=merge_on_cols+cat_diff_cols combined_df = combined_df.drop_duplicates(merge_on_cols, keep='last') except: combined_df=out_df pass ## Just for reference, in future if you want different merge/join option # combined_df = pd.merge(preprocessed_df[diff_cols],out_df, left_index=True, right_index=True, how='inner') except Exception as e: self.log.info("<---- merge error msg : ---->"+str(e)) self.log.info("<---- merge error msg (detailed): ---->"+str(traceback.format_exc())) pass ## if both data frame have different columns (preprocessed and outdf) else: self.log.info("User data is preprocessed and data cleaning happened.So, actual data and processed data length mismatch. So,data records range may vary.") try: # combined_df=self.decoder_labeled_features(out_df) combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner') # combined_df = combined_df.drop_duplicates(cat_cols, keep='last') combined_df = combined_df.drop_duplicates(num_cols, keep='last') except: ## If nothing in merge works,then make outdf as final dataframe. try: ## If above merge fails, change drop_duplicate hyperparam keep='last' last appearance of key occurance. combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner') combined_df = combined_df.drop_duplicates(cat_cols, keep=False) except: #If nothing is works, just keep out_df as combined df combined_df=out_df ## if no common categorical col found between preprocessed and outdf. else: ## If merge not works,then make outdf as final dataframe. if (len(cat_cols) > 0): try: combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner') combined_df = combined_df.drop_duplicates(cat_cols, keep='last') except: #make safe for return combined_df=out_df else: ##If no categorical features available combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner') combined_df = combined_df.drop_duplicates(num_cols, keep=False) except Exception as e: self.log.info("<---- Anomaly detection target data folder not available, dataframe merging issue. Error Msg: ---->"+str(e)) self.log.info("making output df as final merged data, no categorical column found in output anomaly data. It is user responsibility to check the anomaly data.") #make safe for return combined_df=out_df return combined_df ## for module reusability, this below naming fn created. def naming_anomalyvalues(self,df): replace_values_T='|'.join(['TRUE','True','true']) replace_values_F='|'.join(['FALSE','False','false']) try: df['anomaly_value']=df['anomaly_value'].astype(str).replace(replace_values_T,'AnomalyDataPoint', regex=True) except: df['anomaly_value']=df['anomaly_value'].replace(replace_values_T,'AnomalyDataPoint', regex=True) df['anomaly_value']=df['anomaly_value'].astype(str).replace(replace_values_F,'NormalDataPoint', regex=True) return df ## DBScan based anomaly detection def dbscan_ad(self,data,eps,min_samples,cols): try: tuner='randomsearch' Parameter_Trials={'eps':eps, 'min_samples':min_samples} model = DBSCAN(algorithm='auto') hist = model.fit(data) pred = model.fit_predict(data) best_params = self.hyperparamtuning_dbscan(model,tuner,Parameter_Trials,data) self.log.info("<---- Best hyper parameters for dbscan: ---->"+str(best_params)) best_eps=best_params['eps'] best_min_samples=best_params['min_samples'] if (best_min_samples < len(cols)): min_samples=len(cols)+1 if (best_eps < 0.2): best_eps=0.2 self.log.info("best_eps: \n"+str(best_eps)) self.log.info("best_min_samples: \n"+str(best_min_samples)) best_model=DBSCAN(algorithm='auto',eps = best_eps, min_samples = best_min_samples) hist = best_model.fit(data) pred = best_model.fit_predict(data) best_labels=best_model.labels_ cluster_name = ["Cluster"+str(i) for i in set(best_labels)] # outliers = data[best_model.labels_ == -1] outlier_df = data.copy() outlier_df.loc[:,'cluster'] = best_model.labels_ outliers_final=outlier_df[outlier_df['cluster']==-1] outliers_final['anomaly_value']=outliers_final['cluster']==-1 normaldata= outlier_df[outlier_df['cluster']!=-1] self.log.info("<---- DBScan: Anomalies in data: ---->"+str(outliers_final)) self.log.info("<---- DBScan: Number of anomalies in data: ---->"+str(len(outliers_final))) # num_cat_features=len(self.cat_cols) try: self.save_anomalyvalues(outliers_final,'dbscan_anomaly_dataframe') self.save_anomalyvalues(normaldata,'dbscan_normaldata_dataframe') outlier_df['anomaly_value']=outlier_df['cluster']==-1 outlier_df=self.naming_anomalyvalues(outlier_df) ##Convert results to original input data form for end user ease of understanding try: outlier_df=self.merge_pre_post_dfs(outlier_df) except Exception as e: self.log.info("Anomaly Detection Merge df exception:\n"+str(e)) #If merge fails, just out! pass self.save_anomalyvalues(outlier_df,'dbscan_overall_dataframe') except Exception as e: self.log.info("DBScan inversetransform err. msg: \n"+str(e)) no_clusters = len(set(best_labels)) - (1 if -1 in best_labels else 0) self.log.info("<---- DBScan: No of clusters: ---->"+str(no_clusters)) n_noise_ = list(best_labels).count(-1) ## Ploting the dbscan clusters plot_name='dbscan_anomalyplot.png' fig, ax = plt.subplots() ax.set_title("DBScan Clusters") ax.scatter(data.iloc[:, 0], data.iloc[:, 1], c=best_labels) outliers_plot = data[best_model.labels_ == -1] ax.scatter(outliers_plot.iloc[:, 0], outliers_plot.iloc[:, 1], c='red') cwd=self.deployLocation try: plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot')) if not os.path.isdir(plot_dir): os.makedirs(plot_dir) plotpath=str(plot_dir)+'/'+plot_name except Exception as e: self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e)) if os.path.exists(plotpath): os.remove(plotpath) plt.savefig(plotpath) plt.clf() plt.cla() plt.close() except Exception as e: self.log.info("<---- dbscan error msg: ---->"+str(e)) self.log.info("<---- dbscan error msg (detailed): ---->"+str(traceback.format_exc())) return best_model,outliers_final ## Inverse transform fn for categorical data def inverse_transform(self,df,cat_cols,le_model): df_new=pd.DataFrame() df_new.index=df.index df_reset_index=df.reset_index(drop=True) for col in cat_cols: df_reset_index[col] = le_model.inverse_transform(df_reset_index[col].astype(int)) df_reset_index.index=df_new.index df=df_reset_index return df ##If data comes without going via aion data profiler, we can use this below preprcessing fn () ##Preprocess fn for categorical data , not used now. def preprocessfn_categorical(self,df): try: cat_cols=self.cat_cols preprocessed_df=None le=preprocessing.LabelEncoder() self.le_model=le label_encoded_df = df.copy() for col in cat_cols: label_encoded_df[col]=le.fit_transform(label_encoded_df[col]) except Exception as e: self.log.info("preprocessfn_categorical error traceback."+str(traceback.format_exc())) return label_encoded_df,cat_cols ## Design pattern: Factory,Adapter. Detect antoencoder object or dbscan object based on input params. The interface can be used for anyother extention. Not created any abstract class. ##Main autoencoder based anomaly detection function, from here, sub modules will be called. def mainAnomalyDetectionfn(self): df=self.df ## reading post processed data from target->usecase->data directory # df=self.read_inputdata() ## Below line overwrite incoming df with postprocesseddata self.log.info("<----------- In autoencoder based anomaly detection algorithm main process module, the incoming dataframe information as below: \n") buf_info=self.get_df_info(df) self.log.info(buf_info) model_location=None time_series_data=None # mv_unique_feature_ad='' ae_hyperparameter=self.paramSpace # self.log.info("mainAnomalyDetectionfn df: \n"+str(df)) self.log.info("paramSpace in mainano: \n"+str(self.paramSpace)) target=self.target anomaly_algorithm=str(self.anomalyMethod) mv_unique_feature_ad=self.mv_featurebased_ad_status # df=self.dataload(datapath) df.drop(df.filter(regex="Unname"),axis=1, inplace=True) df.drop(df.filter(regex="truncated"),axis=1, inplace=True) # cols2remove=df.columns[df.columns.str.startswith('Unname')] # df.drop(cols2remove,axis=1,inplace=True) # df.to_csv("C:\Users\jayaramakrishnans\AppData\Local\Programs\HCLTech\AION\data\target\actual_df.csv") datetime_column=self.datetimeFeature len_dtf=len(self.datetimeFeature) ##create datetime pattern source py file in target dir. self.create_datetime_pyfile() # cat_cols=None if (self.datetimeFeature.lower() == 'na' or self.datetimeFeature==''): len_dtf=0 if (len_dtf >= 1 ): time_series_data="True" else: time_series_data="False" self.datetimeFeature='' try: if (target != ''): if (anomaly_algorithm.lower() == 'autoencoder'): self.log.info("Supervised anomaly detection started.\n") """ Below part for supervised time series anomaly detection.If timeseries anomaly (supervised) used in future, please uncomemnt and use below code snippet. """ # if (ae_hyperparameter['time_series_data'].lower() == 'true'): # print("Given data is time series data and supervised learning category, because it target is labeled one.") # datetime_column=str(self.datetimeFeature) # time_steps=int(ae_hyperparameter['time_steps']) # test_size_perc=int(ae_hyperparameter['test_size_perc']) # df[datetime_column] = pd.to_datetime(df[datetime_column]) # df.set_index(datetime_column, inplace=True) # self.aionAEAnomalyTS(df,test_size_perc,target,time_steps) ## Removing datetime feature for supervised autoencoder (currently timeseries anomaly not supported in supervised anomaly detection autoencoder) test_size_perc=self.testSize*100 df=df.dropna() model,anomaly_prediction_df,combined_df = self.ae_nontimeseriesmodelfn(df,target) # print("*** End of Autoencoder based non time series Anomaly detection. *** \n") self.log.info("*** End of Autoencoder based non time series Anomaly detection. ***") features=df.columns if (len(features)== 1): # print("Problem type is Univariate time series anomaly detection.\n") self.log.info("Problem type is Univariate time series anomaly detection.\n") test_size_perc=self.testSize*100 df=df.dropna() model,anomaly_prediction_df,combined_df = self.ae_nontimeseriesmodelfn(df,target) elif (len(features) > 1): df.drop(df.filter(regex="Unname"),axis=1, inplace=True) test_size_perc=self.testSize*100 df=df.dropna() model,anomaly_prediction_df,combined_df = self.ae_nontimeseriesmodelfn(df,target) if (mv_unique_feature_ad.lower()=='true'): self.log.info("\n\n *** Below is the anomaly values based on each feature of multivariate time series data. ***") df.drop(df.filter(regex="Unname"),axis=1, inplace=True) multivariate_cols= df.columns.values.tolist() for col in multivariate_cols: col=str(col) ## creating dataframe for one of feature in multivariate dataset. multivariate_col_df = df[col].to_frame() model,anomaly_prediction_df,combined_df = self.ae_nontimeseriesmodelfn(multivariate_col_df,target) else: if (anomaly_algorithm.lower() == 'dbscan'): self.log.info("DBScan algorithm not supported for supervised training. \n") else: try: self.log.info("Unsupervised anomaly detection started....\n") if (anomaly_algorithm.lower() == 'autoencoder'): test_size_perc=self.testSize*100 if (time_series_data.lower() == 'true'): mv_unique_feature_ad=self.mv_featurebased_ad_status dropout=float(ae_hyperparameter['dropout']) datetime_column=str(self.datetimeFeature) time_steps=int(ae_hyperparameter['time_steps']) target=None features=df.columns if (len(features)== 1): self.log.info("Problem type is Univariate time series anomaly detection.\n") model,anomaly_prediction_df,combined_df = self.aionAEAnomalyTS(df,test_size_perc,target,time_steps,dropout,'False') elif (len(features) > 1): df.drop(df.filter(regex="Unname"),axis=1, inplace=True) self.log.info("Problem type is Multivariate time series anomaly detection.\n") self.log.info("*** Detecting anomaly in the given multi feature (Multivariate) data. Anomaly values based on all the features passed to the aion anomaly AE algorithm. ***") model,anomaly_prediction_df,combined_df = self.aionAEAnomalyTS(df,test_size_perc,target,time_steps,dropout,'False') if (mv_unique_feature_ad.lower()=='true'): self.log.info("\n\n *** Below is the anomaly values based on each feature of multivariate time series data. ***") df.drop(df.filter(regex="Unname"),axis=1, inplace=True) multivariate_cols= df.columns.values.tolist() # for i in range(1,len(features)): for col in multivariate_cols: col=str(col) ## creating dataframe for one of feature in multivariate dataset. multivariate_col = df[col].to_frame() feature_based_model,anomaly_prediction_df_mv,combined_df = self.aionAEAnomalyTS(multivariate_col,test_size_perc,target,time_steps,dropout,mv_unique_feature_ad) """ Below code snippet is commented, the purpose is AION giving 3 option to user for time series, < Three types: univariate_tsd(single_timeseriesdetection), multivariate_tsd, multivariate_seperate_tsd>, based on that specific sub function called. """ # if (ae_hyperparameter['timeseries_ad_type'].lower() =='univariate_tsad'): ## univariate_tsd # self.aionAEAnomalyTS(df,test_size_perc,target,time_steps,dropout) # elif (ae_hyperparameter['timeseries_ad_type'].lower() =='multivariate_tsad'): ##multivariate_tsd # if (len(features) <=1): # # self.aionAEAnomalyTS(df,test_size_perc,target,time_steps,dropout) # print("Given data looks like univariate data. Cannot apply multivariate. Check data and select appropriate timeseries anomaly detection option.") # else: # self.aionAEAnomalyTS(df,test_size_perc,target,time_steps,dropout) # elif (ae_hyperparameter['timeseries_ad_type'].lower() =='multivariate_featurebased_tsad'): ## multivariate_seperate_tsd # # features=df.columns # df.drop(df.filter(regex="Unname"),axis=1, inplace=True) # multivariate_cols= df.columns.values.tolist() # print("multivariate_cols: \n",multivariate_cols) # print("multivariate_cols type: \n",type(multivariate_cols)) # if (len(features) <=1): # # self.aionAEAnomalyTS(df,test_size_perc,target,time_steps,dropout) # print("Given data looks like univariate data. Cannot use multivariate.") # else: # # for i in range(1,len(features)): # for col in multivariate_cols: # print("processing multivariate feature name: ",col) # col=str(col) # multivariate_col = df[col].to_frame() # print("multivariate_col type: \n",type(multivariate_col)) # self.aionAEAnomalyTS(multivariate_col,test_size_perc,target,time_steps,dropout) # print("*** End of Autoencoder based time series Anomaly detection.*** \n") self.log.info("*** End of Autoencoder based non time series Anomaly detection. ***") else: target='' df=df.dropna() model,anomaly_prediction_df,combined_df = self.ae_nontimeseriesmodelfn(df,target) elif (anomaly_algorithm.lower() == 'dbscan'): # df=df.dropna() self.log.info("*** DBScan algorithm enabled. ***") cols=df.columns dbscan_hyperparameter=self.paramSpace eps = list(dbscan_hyperparameter['eps']) # eps=list(dbscan_hyperparameter['eps']) # min_samples=list(dbscan_hyperparameter['min_samples']) min_samples = list(dbscan_hyperparameter['min_samples']) model,outliers = self.dbscan_ad(df,eps,min_samples,cols) except Exception as e: self.log.info("Unsupervised anomaly detection error msg: "+str(traceback.format_exc())) ##main ae AeDetector except Exception as e: self.log.info("<---- Main fn error msg of anomaly detection for debug purpose: ---->"+str(e)) self.log.info("<---- Main fn error msg of anomaly detection for debug purpose: (detailed): ---->"+str(traceback.format_exc())) return model ## For testing standalone code if __name__ == '__main__': # print ("main function....") target=None df=None hparams=None AEAD=anomalyDetectionAE() datapath,target,ae_hyperparameter = AEAD.mainAnomalyDetectionfn()
anomalyDetector.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import time import os import sys import logging from sklearn.metrics import accuracy_score, make_scorer from sklearn.model_selection import train_test_split from sklearn.svm import OneClassSVM from sklearn.ensemble import IsolationForest import pickle from sklearn import metrics import numpy as np import pandas as pd from learner.aion_matrix import aion_matrix from learner.parameters import parametersDefine from sklearn.metrics import f1_score from sklearn import model_selection from learner.anomalyDetectionAE import anomalyDetectionAE class anomalyDetector(object): def __init__(self): self.log = logging.getLogger('eion') def startanomalydetector(self,df,target,labelMaps,inlierLabels,learnerJson,model_type,saved_model,anomalyMethod,deployLocation,predicted_data_file,testPercentage,anomalyconfig,datetimeFeature,mv_featurebased_ad_status): try: self.log.info("startanomalydetector.... \n") from io import StringIO buf = StringIO() df.info(buf=buf) #self.log.info(buf.getvalue()) self.log.info("User data info : \n"+str(buf.getvalue())) try: df[datetimeFeature] = pd.to_datetime(df[datetimeFeature]) df.set_index(datetimeFeature, inplace=True) #If still the datetime column exist in feature list, drop it. Because we already made datetime feature as index (datetimeindex) df.drop(datetimeFeature,axis=1,inplace=True) except Exception as e: pass ae_df=df paramObj=parametersDefine() anomalyMethod=anomalyMethod inlierLabels=inlierLabels anomalyDetectionType="" inlierLabelList=inlierLabels.split(",") self.log.info("<---- inlierLabels ---->"+inlierLabels) self.log.info("<---- anomalyMethod ---->"+str(anomalyMethod)) if target != "": self.log.info('Status:- |... AnomalyDetection: Supervised') self.log.info("One class based anomaly Detection by relabeling data to fit one class models") combinedString="" dfStr="" anomalyDetectionType="supervised" if not anomalyMethod.lower() == "autoencoder": ##Added for auto encoder self.log.info("startanomalydetector: df: \n"+str(df)) #task 12627 if labelMaps == {}: for inlierVal in inlierLabelList: inlier=inlierVal dfStr = "x ==" + inlier + " or " combinedString+= dfStr func= combinedString.strip(" or ") else: for inlierVal in inlierLabelList: try: if inlierVal.isnumeric(): inlierVal = int(inlierVal) # inlier=str(labelMaps[inlierVal]) ##Wrongly assigned inlier values to labelMaps dict key. inlier = str(inlierVal) dfStr = "x ==" + str(inlier) + " or " combinedString+= dfStr except Exception as e: raise Exception(e) func= combinedString.strip(" or ") labelMaps={'InlierLabel':1,'NonInlierLabel':-1} targetData=df[target] df['anomaly'] = df[target].apply(lambda x: 1 if eval(func) else -1 ) anomtargetData=df['anomaly'] self.log.info("dataframe after relabeling the data") self.log.info(df.head()) self.log.info("target column value counts with inliers and outliers") self.log.info(df['anomaly'].value_counts()) df.drop([target, "anomaly"], axis=1, inplace=True) outliers = anomtargetData[anomtargetData == -1] self.log.info("outliers in data") self.log.info(outliers.shape[0]) self.log.info("outlier fraction") self.log.info(outliers.shape[0]/targetData.shape[0]) if int(testPercentage) != 0: testSize= testPercentage/100 xtrain, xtest, ytrain, ytest = train_test_split(df, anomtargetData, test_size = testSize) else: xtrain =df xtest =df ytrain=anomtargetData ytest =anomtargetData if anomalyMethod.lower() == "isolationforest": modelName="isolationforest" paramSpace=anomalyconfig['modelParams']['IsolationForest'] paramDict =paramObj.paramDefine(paramSpace,'random') ftwo_scorer = make_scorer(accuracy_score) isolation_forest = model_selection.RandomizedSearchCV(IsolationForest(), paramDict, scoring=ftwo_scorer, n_iter=10) mod = isolation_forest.fit(xtrain,ytrain) model = mod.best_estimator_ elif anomalyMethod.lower() == "oneclasssvm": modelName="oneClassSVM" fthree_scorer = make_scorer(accuracy_score) paramSpace=anomalyconfig['modelParams']['oneclassSVM'] paramDict =paramObj.paramDefine(paramSpace,'random') one_class = model_selection.RandomizedSearchCV(OneClassSVM(), paramDict, scoring=fthree_scorer, n_iter=10) mod = one_class.fit(xtrain,ytrain) model = mod.best_estimator_ elif anomalyMethod.lower() == "autoencoder": modelName='autoencoder' testSize=testPercentage/100 self.log.info("Aion Autoencoder anomaly detection started..") paramSpace=anomalyconfig['modelParams']['AutoEncoder'] adae_obj=anomalyDetectionAE(ae_df,paramSpace,deployLocation,target,anomalyMethod,testSize,datetimeFeature,mv_featurebased_ad_status) model=adae_obj.mainAnomalyDetectionfn() self.log.info("Aion Autoencoder anomaly detection completed..") else: self.log.info("IsolationForest, OneClassSVM and autoencoder are supported models") modelName = "" model = "" else: self.log.info('Status:- |... AnomalyDetection: Unsupervised') self.log.info("unsupervised anomaly detection") anomalyDetectionType="unsupervised" model=None xtrain =df xtest = df ytrain = pd.DataFrame() if anomalyMethod.lower() == "isolationforest": paramSpace=anomalyconfig['modelParams']['IsolationForest'] paramDict =paramObj.paramDefine(paramSpace,'random') modelName="isolationforest" def scorer_f(estimator, X): return np.mean(estimator.score_samples(X)) isolation_forest = model_selection.RandomizedSearchCV(IsolationForest(), paramDict, scoring=scorer_f, n_iter=10) mod = isolation_forest.fit(xtrain) self.log.info('---------> Best Param: '+str(mod.best_params_)) model = mod.best_estimator_ elif anomalyMethod.lower() == "oneclasssvm": paramSpace=anomalyconfig['modelParams']['oneclassSVM'] paramDict =paramObj.paramDefine(paramSpace,'random') modelName="oneClassSVM" def scorer_f1(estimator, X): return np.mean(estimator.score_samples(X)) one_class = model_selection.RandomizedSearchCV(OneClassSVM(), paramDict, scoring=scorer_f1, n_iter=10) model = one_class.fit(xtrain) self.log.info('---------> Best Param: '+str(model.best_params_)) model = model.best_estimator_ elif anomalyMethod.lower() == "autoencoder": ae_df.drop(ae_df.filter(regex="Unname"),axis=1, inplace=True) modelName='autoencoder' testSize= testPercentage/100 self.log.info("Aion Autoencoder anomaly detection started..") paramSpace=anomalyconfig['modelParams']['AutoEncoder'] adae_obj=anomalyDetectionAE(ae_df,paramSpace,deployLocation,target,anomalyMethod,testSize,datetimeFeature,mv_featurebased_ad_status) model=adae_obj.mainAnomalyDetectionfn() self.log.info("Aion Autoencoder anomaly detection completed..") elif anomalyMethod.lower() == "dbscan": testSize=testPercentage/100 ae_df.drop(ae_df.filter(regex="Unname"),axis=1, inplace=True) modelName='dbscan' self.log.info("Aion DBScan anomaly detection started..") paramSpace=anomalyconfig['modelParams']['DBScan'] adae_obj=anomalyDetectionAE(ae_df,paramSpace,deployLocation,target,anomalyMethod,testSize,datetimeFeature,mv_featurebased_ad_status) model=adae_obj.mainAnomalyDetectionfn() self.log.info("Aion DBScan anomaly detection completed..") else: self.log.info("IsolationForest,OneClassSVM,autoencoder and DBScan are supported models") modelName = "" model = "" self.log.info('Status:- |... AnomalyDetection Algorithm applied: '+modelName) if (anomalyMethod.lower() == "autoencoder" or anomalyMethod.lower() == "dbscan"): if (anomalyMethod.lower() == "autoencoder"): ## Since autoencoder is implemented using tf.keras, saving the model in h5 format. If we save it in .sav format will give 'TensorSliceReader constructor' error. saved_model=saved_model.replace('.sav','') filename = os.path.join(deployLocation,'model',saved_model) model.save(filename,save_format="tf") elif (anomalyMethod.lower() == "dbscan"): filename = os.path.join(deployLocation,'model',saved_model) pickle.dump(model, open(filename, 'wb')) matrix='' trainmatrix='' accuracy = 0 else: filename = os.path.join(deployLocation,'model',saved_model) pickle.dump(model, open(filename, 'wb')) loaded_model=pickle.load(open(filename, 'rb')) pred_testData=loaded_model.predict(xtest) pred_trainData = loaded_model.predict(xtrain) pred_trainScore = loaded_model.decision_function(xtrain) self.log.info("<--- predicted values of testdata --->") self.log.info(pred_testData) if anomalyDetectionType == "supervised" : df_predicted = pd.DataFrame() df_predicted['actual'] = ytest df_predicted['predict'] = pred_testData df_predicted.to_csv(predicted_data_file) preds = pred_testData targs = ytest unique_elements_ytest, counts_elements_ytest = np.unique(targs, return_counts=True) unique_elements_pred, counts_elements_pred = np.unique(preds, return_counts=True) ''' for i in range(0,len(unique_elements_ytest)): self.log.info("unique value :" +str(unique_elements_ytest[i]) + " count in input testdata: " + str(counts_elements_ytest[i]) +" count in predicted testdata: " + str(counts_elements_pred[i])) self.log.info("\n") ''' self.log.info("\n======= Performance matrix on Test Data ======") aion_matrixobj = aion_matrix() self.log.info("-------> Test Matrix: ") matrix = aion_matrixobj.getClassificationPerformaceMatrix(targs,preds,labelMaps) self.log.info("-------> Train Matrix: ") trainmatrix = aion_matrixobj.getClassificationPerformaceMatrix(ytrain,pred_trainData,labelMaps) #self.log.info("-------> Confusion Matrix: ") self.log.info(metrics.confusion_matrix(targs,preds)) self.log.info("-------> accuracy for inliers: ") accuracy = metrics.accuracy_score(targs, preds) self.log.info(metrics.accuracy_score(targs, preds)) self.log.info("-------> precision for inliers --->") self.log.info(metrics.precision_score(targs, preds)) self.log.info("-------> recall for inliers ---> ") self.log.info(metrics.recall_score(targs, preds)) self.log.info("-------> f1 for inliers--->") self.log.info(metrics.f1_score(targs, preds)) self.log.info("-------> area under curve (auc) for inliers --->") self.log.info(metrics.roc_auc_score(targs, preds)) self.log.info("-------> precision for outliers --->") self.log.info(1-metrics.precision_score(targs, preds)) self.log.info("-------> recall for outliers ---> ") self.log.info(1-metrics.recall_score(targs, preds)) self.log.info("-------> f1 for outliers--->") self.log.info(1-metrics.f1_score(targs, preds)) self.log.info("======= Performance matrix on Test Data End ======\n") else: df_predicted = xtrain df_predicted['predict'] = pred_trainData df_predicted['score'] = pred_trainScore df_predicted.to_csv(predicted_data_file, index=False) matrix = '' trainmatrix = '' accuracy = 'NA' return modelName,model,matrix,trainmatrix,accuracy,labelMaps except Exception as inst: self.log.info("Error: anomalyDetector failed "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
regressionModel.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from learner.optimizetechnique import OptimizationTq import warnings from learner.parameters import parametersDefine from learner.defaultAlgos import defaultParams from hyperopt import fmin, tpe, hp, STATUS_OK, Trials import time import logging import os import sys import json from sklearn.svm import SVR from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from xgboost import XGBRegressor from lightgbm import LGBMRegressor from catboost import CatBoostRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error,make_scorer from sklearn.metrics import mean_squared_error from learner.aion_matrix import aion_matrix from uncertainties.aionUQ import aionUQ import mlflow class RegressionModel(): def __init__(self,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,deployLocation): self.modelList =modelList self.params =params self.trainX =trainX self.trainY =trainY self.testX = testX self.testY = testY self.method =method self.scoreParam=scoreParam self.cvSplit=cvSplit self.numIter=numIter self.geneticParam=geneticParam self.log = logging.getLogger('eion') self.deployLocation = deployLocation self.uq_x_train = trainX self.uq_x_test = testX self.uq_y_train = trainY self.uq_y_test = testY self.AlgorithmNames={'Linear Regression':'LinearRegression','Lasso':'Lasso','Ridge':'Ridge','Decision Tree':'DecisionTreeRegressor','Random Forest':'RandomForestRegressor','Extreme Gradient Boosting (XGBoost)':'XGBRegressor','Light Gradient Boosting (LightGBM)': 'LGBMRegressor', 'Categorical Boosting (CatBoost)': 'CatBoostRegressor','Bagging (Ensemble)':'BaggingRegressor','Stacking (Ensemble)':'StackingRegressor','Voting (Ensemble)':'VotingRegressor','Neural Architecture Search':'NAS'} self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()} def logMlflow(self, runName, params, metrices, estimator, algoName=None): with mlflow.start_run(run_name = runName): for k,v in params.items(): mlflow.log_param(k, v) for k,v in metrices.items(): mlflow.log_metric(k, v) if algoName == 'CatBoostRegressor': mlflow.catboost.log_model(estimator, "model") else: mlflow.sklearn.log_model(estimator, "model") model_uri = mlflow.get_artifact_uri("model") """ for some dataset evaluate takes more than 90 min, so commenting till some solution is not found evaluate_data = self.testX.copy() evaluate_data['target'] = self.testY.copy() mlflow.evaluate(model_uri, data=evaluate_data, targets='target', model_type="regressor") del evaluate_data """ def regressionModelling(self,modelOrFeatureBased, code_configure): paramObj=parametersDefine() bestModel='' bestParams={} import sys bestScore=-sys.float_info.max #bugfix 11656 scoredetails = '' self.log.info('\n---------- Regression Model has started ----------') try: self.log.info('Status:- |... Search Optimization Method applied: '+self.method) for modelName in self.modelList: objClf = aion_matrix() if modelName in ['Bagging (Ensemble)','Voting (Ensemble)','Stacking (Ensemble)','Neural Architecture Search']: if modelName == 'Bagging (Ensemble)': from ensemble.ensemble_bagging import ensemble_bagging ensemble_bagging_obj = ensemble_bagging(self.params[modelName],self.scoreParam,0,0) estimator,modelParams,score,model = ensemble_bagging_obj.ensemble_bagging__regressor(self.trainX,self.trainY,self.testX,self.testY) if modelName == 'Stacking (Ensemble)': from ensemble.ensemble_stacking import ensemble_stacking ensemble_stacking_obj = ensemble_stacking(self.params[modelName],self.scoreParam) estimator,modelParams,score,model = ensemble_stacking_obj.ensemble_stacking__regressor(self.trainX,self.trainY,self.testX,self.testY,self.modelList) if modelName == 'Voting (Ensemble)': from ensemble.ensemble_voting import ensemble_voting ensemble_voting_obj = ensemble_voting(self.params[modelName],self.scoreParam) estimator,modelParams,score,model = ensemble_voting_obj.ensemble_voting__regressor(self.trainX,self.trainY,self.testX,self.testY,self.modelList) ''' if modelName == 'Neural Architecture Search': from nas.aionNAS import aionNAS objNAS = aionNAS('Regression',self.params[modelName],self.trainX,self.testX,self.trainY,self.testY,self.deployLocation) estimator,modelParams,score,model=objNAS.nasMain(self.scoreParam) ''' if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(score)+',"ModelUncertainty":"NA"}' if self.scoreParam == "r2": if score > bestScore: bestScore =score bestModel =model bestParams=modelParams bestEstimator=estimator else: if abs(score) < bestScore or bestScore == -sys.float_info.max: bestScore =abs(score) bestModel =model bestParams=modelParams bestEstimator=estimator self.log.info('Status:- |... ML Algorithm applied: '+modelName) self.log.info('Status:- |... Score: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\n') continue if modelName not in self.params: continue paramSpace=self.params[modelName].copy() algoName = self.AlgorithmNames[modelName] paramDict =paramObj.paramDefine(paramSpace,self.method) if self.method == 'bayesopt': code_configure.add_model(algoName,paramSpace) else: paramDictCopy = paramDict # numpy array is not json serializable #numpy is already imported but still np.ndarray raise error import numpy as np for key,value in paramDictCopy.items(): if isinstance(value, np.ndarray): paramDictCopy[key] = paramDictCopy[key].tolist() code_configure.add_model(algoName,paramDictCopy) if not self.method == 'bayesopt': paramSize = paramObj.getParamSpaceSize(paramDict) else: paramSize = 0 if (self.method == 'bayesopt' and not paramDict) or (not self.method == 'bayesopt' and paramSize<=1): try: start = time.time() #function call defObj = defaultParams(algoName,paramDict,self.scoreParam,0,0,paramSize) estimator, modelParams, model,score =defObj.startTrainingRegression(self.trainX,self.trainY,self.testX,self.testY) executionTime = time.time() - start self.log.info('---------> Total Execution: ' + str(executionTime) + '\n') if (scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"' + self.modelToAlgoNames[model] + '","FeatureEngineering":"' + str( modelOrFeatureBased) + '","Score":' + str(score) + ',"ModelUncertainty":"NA"}' if self.scoreParam == "r2": if score > bestScore: bestScore = score bestModel = model bestParams = modelParams bestEstimator = estimator else: if abs(score) < bestScore or bestScore == -sys.float_info.max: bestScore = abs(score) bestModel = model bestParams = modelParams bestEstimator = estimator self.log.info('Status:- |... ML Algorithm applied: ' + modelName) self.log.info('Status:- |... Score: ' + objClf.get_print_score(self.scoreParam) + '=' + str( round(score, 2)) + '\n') except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------' + modelName + ' Model Execution failed!!!.' + str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) self.log.info('\n < ---------- Model Execution Failed End --------->') continue trainingStatus = 'Success' if self.method =='grid': try: self.log.info("-------> Optimization Method :Grid Search") self.log.info("-------> Model Name: "+str(modelName)) opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) start = time.time() model,modelParams,score,estimator=opTq.gridSearchOpt() executionTime=time.time() - start if not self.testX.empty: predictedData = estimator.predict(self.testX) if 'neg_mean_squared_error' in self.scoreParam: meanssquatederror = mean_squared_error(self.testY,predictedData) score = meanssquatederror elif 'neg_root_mean_squared_error' in self.scoreParam: rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False) score = rootmeanssquatederror elif 'mae' in self.scoreParam: meanabsoluteerror=mean_absolute_error(self.testY,predictedData) score = meanabsoluteerror elif 'r2' in self.scoreParam: r2score=r2_score(self.testY,predictedData) score = r2score problemName = estimator.__class__.__name__ runName = algoName + '_' + modelOrFeatureBased metrices = {} metrices["score"] = score try: self.logMlflow(runName, modelParams, metrices, estimator,algoName) except Exception as e: self.log.info('\n-----> ML flow error!!!.' + str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) # raise pass uq_jsonobject = '' try: if model in ['XGBRegressor','LGBMRegressor','CatBoostRegressor']: self.log.info('-----> Model Uncertainty Not Supported') else: problemName = estimator.__class__.__name__ uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq") self.log.info("-------> model_confidence: "+str(total_picp_percentage)+str('%')) self.log.info("-------> model_uncertainty: "+str(total_Uncertainty_percentage)+str('%')) except: pass if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(abs(score))+',"ModelUncertainty":'+str(json.dumps(uq_jsonobject))+'}' self.log.info('---------> Total Execution: '+str(executionTime)+'\n') if self.scoreParam == "r2": if score > bestScore: bestScore =score bestModel =model bestParams=modelParams bestEstimator=estimator else: if abs(score) < bestScore or bestScore == -sys.float_info.max: bestScore =abs(score) bestModel =model bestParams=modelParams bestEstimator=estimator except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) self.log.info('\n < ---------- Model Execution Failed End --------->') trainingStatus = 'Error (Exception)' elif self.method == 'random': try: self.log.info("-------> Optimization Method :Random Search") self.log.info("-------> Model Name: "+str(modelName)) opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) start = time.time() model,modelParams,score,estimator=opTq.randomSearchOpt() executionTime=time.time() - start if not self.testX.empty: predictedData = estimator.predict(self.testX) if 'neg_mean_squared_error' in self.scoreParam: meanssquatederror = mean_squared_error(self.testY,predictedData) score = meanssquatederror elif 'neg_root_mean_squared_error' in self.scoreParam: rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False) score = rootmeanssquatederror elif 'mae' in self.scoreParam: meanabsoluteerror=mean_absolute_error(self.testY,predictedData) score = meanabsoluteerror elif 'r2' in self.scoreParam: r2score=r2_score(self.testY,predictedData) score = r2score if self.scoreParam == "r2": if score>bestScore: bestScore =score bestModel =model bestParams=modelParams bestEstimator=estimator else: if abs(score) < bestScore or bestScore == -sys.float_info.max: bestScore =abs(score) bestModel =model bestParams=modelParams bestEstimator=estimator problemName = estimator.__class__.__name__ runName = algoName + '_' + modelOrFeatureBased metrices = {} metrices["score"] = score try: self.logMlflow(runName, modelParams, metrices, estimator,algoName) except Exception as e: self.log.info('\n-----> ML flow error!!!.' + str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) # raise pass uq_jsonobject = '' try: if model in ['XGBRegressor','LGBMRegressor','CatBoostRegressor']: self.log.info('-----> Model Uncertainty Not Supported') else: problemName = estimator.__class__.__name__ uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq") self.log.info("-------> model_confidence: "+str(total_picp_percentage)+str('%')) self.log.info("-------> model_uncertainty: "+str(total_Uncertainty_percentage)+str('%')) except Exception as e: print(e) pass if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(abs(score))+',"ModelUncertainty":'+str(json.dumps(uq_jsonobject))+'}' except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) self.log.info('\n < ---------- Model Execution Failed End --------->') trainingStatus = 'Error (Exception)' elif self.method == 'bayesopt': try: self.log.info("-------> Optimization Method :BayesOpt Search") self.log.info("-------> Model Name: "+str(modelName)) opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) fun=opTq.f trials = Trials() start = time.time() best = fmin(fun,paramDict,algo=tpe.suggest, max_evals=100, trials=trials) executionTime=time.time() - start results = sorted(trials.results, key = lambda x: x['loss']) bestresult=results[0] model=bestresult['model'] score=bestresult['score'] modelParams=bestresult['params'] res = ', '.join("{!s}={!r}".format(key,val) for (key,val) in modelParams.items()) modelObj=eval(model+'('+res+')') estimator = modelObj.fit(self.trainX,self.trainY) if not self.testX.empty: predictedData = estimator.predict(self.testX) if 'neg_mean_squared_error' in self.scoreParam: meanssquatederror = mean_squared_error(self.testY,predictedData) score = meanssquatederror elif 'neg_root_mean_squared_error' in self.scoreParam: rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False) score = rootmeanssquatederror elif 'mae' in self.scoreParam: meanabsoluteerror=mean_absolute_error(self.testY,predictedData) score = meanabsoluteerror elif 'r2' in self.scoreParam: r2score=r2_score(self.testY,predictedData) score = r2score problemName = estimator.__class__.__name__ runName = algoName + '_' + modelOrFeatureBased metrices = {} metrices["score"] = score try: self.logMlflow(runName, modelParams, metrices, estimator,algoName) except Exception as e: self.log.info('\n-----> ML flow error!!!.' + str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) # raise pass if self.scoreParam == "r2": if score>bestScore: bestScore =score bestModel =model bestParams=modelParams bestEstimator=estimator else: if abs(score) < bestScore or bestScore == -sys.float_info.max: bestScore =abs(score) bestModel =model bestParams=modelParams bestEstimator=estimator uq_jsonobject = '' try: if model in ['XGBRegressor','LGBMRegressor','CatBoostRegressor']: self.log.info('-----> Model Uncertainty Not Supported') else: problemName = estimator.__class__.__name__ uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq") self.log.info("-------> model_confidence: "+str(total_picp_percentage)+str('%')) self.log.info("-------> model_uncertainty: "+str(total_Uncertainty_percentage)+str('%')) except: pass if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(abs(score))+',"ModelUncertainty":'+str(json.dumps(uq_jsonobject))+'}' self.log.info('---------> Total Execution: '+str(executionTime)+'\n') except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) self.log.info('\n < ---------- Model Execution Failed End --------->') trainingStatus = 'Error (Exception)' else: trainingStatus = 'Error (HyperTunning Algo Not Supported)' pass self.log.info('Status:- |... ML Algorithm applied: '+modelName) if trainingStatus.lower() == 'success': self.log.info('Status:- |... Score after hyperparameter tuning: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\n') else: self.log.info('Status:- |... Training Error : '+trainingStatus+'\n') if bestModel != 'None': self.log.info('---------- Regression Model End ---------- \n') self.log.info('\n------- Best Model and its parameters -------------') self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2))) self.log.info("-------> Best Name: "+str(bestModel)) self.log.info("-------> Best Score: "+str(bestScore)) else: raise Exception("Sorry, no model is trained") return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails except Exception as inst: self.log.info( '\n-----> regressionModel failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
defaultAlgos.py
import numpy as np # from learner.classificationModel import ClassifierModel from learner.aion_matrix import aion_matrix from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error,make_scorer from sklearn.metrics import mean_squared_error class defaultParams(): def __init__(self, modelName, paramDict, scoreParam, MakeFP0, MakeFN0,paramSize): self.modelName = modelName self.paramDict = paramDict self.scoreParam = scoreParam self.MakeFP0 = MakeFP0 self.MakeFN0 = MakeFN0 self.dictsize = paramSize def paramDictConvertion(self): if self.dictsize != 0: for keys in self.paramDict.keys(): self.paramDict[keys] = self.paramDict[keys][0] def startTrainingClassification(self, trainX, trainY, testX, testY): threshold = -1 precisionscore = -1 recallscore = -1 objClf = aion_matrix() self.paramDictConvertion() if self.modelName == 'LogisticRegression': from sklearn import linear_model estimator = linear_model.LogisticRegression() if self.modelName == 'GaussianNB': from sklearn.naive_bayes import GaussianNB estimator = GaussianNB() if self.modelName == 'SVC': from sklearn import svm estimator = svm.SVC() if self.modelName == 'KNeighborsClassifier': from sklearn.neighbors import KNeighborsClassifier estimator = KNeighborsClassifier() if self.modelName == 'DecisionTreeClassifier': from sklearn.tree import DecisionTreeClassifier estimator = DecisionTreeClassifier() if self.modelName == 'RandomForestClassifier': from sklearn.ensemble import RandomForestClassifier estimator = RandomForestClassifier() if self.modelName == 'GradientBoostingClassifier': from sklearn.ensemble import GradientBoostingClassifier estimator = GradientBoostingClassifier() if self.modelName == 'XGBClassifier': import xgboost as xgb estimator = xgb.XGBClassifier() if self.modelName == 'CatBoostClassifier': from catboost import CatBoostClassifier estimator = CatBoostClassifier() if self.modelName == 'LGBMClassifier': from lightgbm import LGBMClassifier estimator = LGBMClassifier() if self.dictsize != 0: estimator.set_params(**self.paramDict) estimator.fit(trainX, trainY) if not testX.empty: predictedData = estimator.predict(testX) score = objClf.get_score(self.scoreParam, testY, predictedData) if self.MakeFP0: self.log.info('-------- Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange, endRange, stepsize) threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY, threshold_range, 'FP', self.modelName) self.log.info('-------- Calculate Threshold for FP End-------') if self.MakeFN0: self.log.info('-------- Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange, endRange, stepsize) threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY, threshold_range, 'FN', self.modelName) self.log.info('-------- Calculate Threshold for FN End-------') else: predictedData = estimator.predict(trainX) score = objClf.get_score(self.scoreParam, trainY, predictedData) if self.MakeFP0: self.log.info('-------- Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange, endRange, stepsize) threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY, threshold_range, 'FP', self.modelName) self.log.info('-------- Calculate Threshold for FP End-------') if self.MakeFN0: self.log.info('-------- Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange, endRange, stepsize) threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY, threshold_range, 'FN', self.modelName) self.log.info('-------- Calculate Threshold for FN End-------') # status, bscore, bthres, brscore, bpscore = objClf.getBestModel(self.MakeFP0,self.MakeFN0, threshold, # bestthreshold, recallscore, bestrecallscore, # precisionscore, bestprecisionscore, score, # bestScore) return estimator, estimator.get_params(), self.modelName, score, threshold, precisionscore, recallscore def startTrainingRegression(self, trainX, trainY, testX, testY): #objClf = aion_matrix() try: score = 0 self.paramDictConvertion() if self.modelName=="LinearRegression": from sklearn import linear_model estimator = linear_model.LinearRegression() if self.modelName=="Lasso": from sklearn import linear_model estimator = linear_model.Lasso() if self.modelName=="Ridge": from sklearn import linear_model estimator = linear_model.Ridge() if self.modelName=="DecisionTreeRegressor": from sklearn.tree import DecisionTreeRegressor estimator = DecisionTreeRegressor() if self.modelName=="RandomForestRegressor": from sklearn.ensemble import RandomForestRegressor estimator = RandomForestRegressor() if self.modelName== "XGBRegressor": import xgboost as xgb estimator = xgb.XGBRegressor() if self.modelName == 'CatBoostRegressor': from catboost import CatBoostRegressor estimator = CatBoostRegressor() if self.modelName == 'LGBMRegressor': from lightgbm import LGBMRegressor estimator = LGBMRegressor() if self.dictsize != 0: estimator.set_params(**self.paramDict) estimator.fit(trainX, trainY) except Exception as e: print(e) if not testX.empty: predictedData = estimator.predict(testX) if 'neg_mean_squared_error' in self.scoreParam: meanssquatederror = mean_squared_error(testY, predictedData) score = meanssquatederror elif 'neg_root_mean_squared_error' in self.scoreParam: rootmeanssquatederror = mean_squared_error(testY, predictedData, squared=False) score = rootmeanssquatederror elif 'mae' in self.scoreParam: meanabsoluteerror = mean_absolute_error(testY, predictedData) score = meanabsoluteerror elif 'r2' in self.scoreParam: r2score = r2_score(testY, predictedData) score = r2score else: predictedData = estimator.predict(trainX) if 'neg_mean_squared_error' in self.scoreParam: meanssquatederror = mean_squared_error(trainY, predictedData) score = meanssquatederror elif 'neg_root_mean_squared_error' in self.scoreParam: rootmeanssquatederror = mean_squared_error(trainY, predictedData, squared=False) score = rootmeanssquatederror elif 'mae' in self.scoreParam: meanabsoluteerror = mean_absolute_error(trainY, predictedData) score = meanabsoluteerror elif 'r2' in self.scoreParam: r2score = r2_score(trainY, predictedData) score = r2score return estimator, estimator.get_params(), self.modelName, score
objectDetector.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import sys import json import shutil import tarfile import logging import subprocess from os.path import expanduser import platform from pathlib import Path, PurePosixPath import tensorflow.compat.v2 as tf os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1) from google.protobuf import text_format ''' from object_detection import model_lib_v2 from object_detection import model_main_tf2 from object_detection import exporter_lib_v2 from object_detection.utils import config_util from object_detection.protos import pipeline_pb2 ''' from learner.cloudServer import awsGPUTraining class objectDetector(object): def __init__(self, dataLocation, pretrainedModels, modelDirName,remoteTrainingConfig): self.log = logging.getLogger('eion') self.dataLocation = dataLocation self.pretrainedModels = Path(pretrainedModels) self.modelDirName = Path(modelDirName['file']) self.modelURLDict = modelDirName self.gpu = remoteTrainingConfig['Enable'] self.serverConfig = remoteTrainingConfig self.modelOutput = Path(dataLocation).parent/"export" if remoteTrainingConfig['Enable']: ''' if not Path(serverConfigFile).is_file(): raise ValueError("Gpu training is enabled but server config file is not present.") with open(serverConfigFile) as fObj: self.serverConfig = json.load(fObj) ''' self.tfRecordLoc = PurePosixPath('aion/data/od') self.pipelineLoc = PurePosixPath('aion/data/od') self.labelMapLoc = PurePosixPath('aion/data/od') self.gpuPretrainedModelPath = PurePosixPath('aion/pretrainedModels')/self.modelDirName else: self.tfRecordLoc = Path(dataLocation) self.pipelineLoc = Path(dataLocation) self.labelMapLoc = Path(dataLocation) self.gpuPretrainedModelPath = None def prepareConfig(self, detectionModel, num_classes, n_epoch, batch_size): home = expanduser("~") if platform.system() == 'Windows': modelPath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','ObjectDetection') else: modelPath = os.path.join(home,'HCLT','AION','PreTrainedModels','ObjectDetection') pipeline_config = str(modelPath/self.modelDirName/"pipeline.config") checkPoint = "ckpt-0" with open(str(modelPath/self.modelDirName/"checkpoint/checkpoint")) as f: line = f.readline() checkPoint = line.split(':')[1].strip()[1:-1] #(model_checkpoint_path: "ckpt-301") to ckpt-301 checkPoint = "checkpoint/"+checkPoint from object_detection.utils import config_util configs = config_util.get_configs_from_pipeline_file(pipeline_config) model_config = configs['model'] if detectionModel.lower() == 'ssd': model_config.ssd.num_classes = num_classes configs['train_config'].fine_tune_checkpoint_type = "detection" elif detectionModel.lower() == 'centernet': model_config.center_net.num_classes = num_classes configs['train_config'].fine_tune_checkpoint_type = "fine_tune" elif detectionModel.lower() == 'fasterrcnn': model_config.faster_rcnn.num_classes = num_classes configs['train_config'].fine_tune_checkpoint_type = "detection" else: raise ValueError("{} Model is not supported for object detection.\n".format(detectionModel)) if self.gpu: checkpointPath = str(self.gpuPretrainedModelPath / checkPoint) else: checkpointPath = str(modelPath/self.modelDirName/checkPoint) configs['train_config'].fine_tune_checkpoint = checkpointPath configs['train_config'].num_steps = n_epoch configs['train_config'].batch_size = batch_size configs['train_input_config'].tf_record_input_reader.input_path[:] = [str(self.tfRecordLoc/"train.tfrecord")] configs['train_input_config'].label_map_path = str(self.labelMapLoc/"label_map.pbtxt") configs['eval_input_config'].tf_record_input_reader.input_path[:] = [self.dataLocation + "/test.tfrecord"] configs['eval_input_config'].label_map_path = self.dataLocation + "/label_map.pbtxt" # Save new pipeline config new_pipeline_proto = config_util.create_pipeline_proto_from_configs(configs) config_util.save_pipeline_config(new_pipeline_proto, self.dataLocation) def __exportModel(self): self.log.info('-------> exporting trained Model') from object_detection.protos import pipeline_pb2 from object_detection import exporter_lib_v2 pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.io.gfile.GFile(str(self.pipelineLoc/"pipeline.config"), 'r') as f: text_format.Merge(f.read(), pipeline_config) text_format.Merge('', pipeline_config) exporter_lib_v2.export_inference_graph( 'image_tensor', pipeline_config, self.dataLocation, str(self.modelOutput)) def startObjectDetector(self): if self.gpu: self.log.info('-------> Training on the cloud machine') self.log.info('Status:- |...Remote Machine Training') with open(self.dataLocation+'\model.config', 'w')as f: json.dump( self.modelURLDict, f) awsGpu = awsGPUTraining(self.serverConfig) try: awsGpu.start_instance() awsGpu.copy_files_to_server(self.dataLocation) awsGpu.start_executing_notebook() self.log.info('-------> Downloading trained model file') tarFile = awsGpu.copy_file_from_server(self.dataLocation) with tarfile.open(tarFile) as tar: tar.extractall(self.dataLocation) awsGpu.stop_server_instance() except: awsGpu.stop_server_instance() raise extractedPath = Path(self.dataLocation)/Path(tarFile).name.split('.')[0] filesList = extractedPath.glob('**/*') for file in filesList: if file.parent == extractedPath: if file.name == "export": shutil.copytree(file, self.modelOutput) elif file.is_dir(): shutil.copytree(file, Path(self.dataLocation)/file.name) else: shutil.copy2(file, self.dataLocation) shutil.rmtree(extractedPath) Path(tarFile).unlink() shutil.copy2(self.dataLocation + "/label_map.pbtxt", str(self.modelOutput)) else: self.log.info('-------> Training on the local machine') self.log.info('Status:- |...Local Machine Training') tf.config.set_soft_device_placement(True) strategy = tf.compat.v2.distribute.MirroredStrategy() with strategy.scope(): try: from object_detection import model_lib_v2 model_lib_v2.train_loop( pipeline_config_path=str(self.pipelineLoc/"pipeline.config"), model_dir=str(self.dataLocation)) except Exception: raise self.__exportModel() shutil.copy2(str(self.labelMapLoc/"label_map.pbtxt"), str(self.modelOutput)) def evaluateObjectDetector(self, model_dir, pipeline_config_dir=None, checkpoint_dir=None): if checkpoint_dir == None: checkpoint_dir = model_dir if pipeline_config_dir == None: pipeline_config_dir = model_dir self.log.info('-------> Evaluation started') from object_detection import model_main_tf2 cmd = '"{}" "{}" --model_dir="{}" --pipeline_config_path="{}/pipeline.config" --checkpoint_dir="{}" --eval_timeout=6'.format(sys.executable, model_main_tf2.__file__, model_dir, model_dir, checkpoint_dir) result = subprocess.run(cmd , capture_output=True, text=True,shell=True) precisionParam = ['Average Precision', 'Average Recall'] text = result.stdout.split('\n') stats = {} keys = [] try: for x in text: for y in precisionParam: indx = x.find(y) if indx != -1: keyValue = x[indx:].split(' = ') stats[keyValue[0]] = keyValue[1] keys.append(keyValue[0]) except Exception as e: raise ValueError("Error in evaluation: " + str(e)) self.log.info('-------> Evaluation statistics:') self.log.info(stats) return stats, keys
optimizetechnique.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import SGDClassifier, PassiveAggressiveClassifier from sklearn.linear_model import SGDRegressor, PassiveAggressiveRegressor from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import cross_val_score from sklearn.svm import SVC from hyperopt import fmin, tpe, hp, STATUS_OK, Trials from sklearn.svm import SVR import xgboost as xgb from xgboost import XGBClassifier from lightgbm import LGBMClassifier from catboost import CatBoostClassifier from xgboost import XGBRegressor from lightgbm import LGBMRegressor from catboost import CatBoostRegressor from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor import warnings warnings.filterwarnings('ignore') import time import logging import sys,os class StreamToLogger(object): def __init__(self, logger, log_level=logging.INFO): self.logger = logger self.log_level = log_level self.linebuf = '' def write(self, buf): for line in buf.rstrip().splitlines(): self.logger.log(self.log_level, 'Model:- Iteration:: '+line.rstrip()) class OptimizationTq(): def __init__(self,modelName,tuneParams,cvSplit,scoreParam,nIter,trainX,trainY,geneticParam=None): self.data = None self.model=modelName self.params =tuneParams self.cvSplit=cvSplit self.scoreParam=scoreParam self.trainX =trainX self.trainY = trainY self.geneticParam=geneticParam if geneticParam else {} self.nIter =nIter self.count =0 self.best =0 self.log = logging.getLogger('eion') def gridSearchOpt(self): try: sl = StreamToLogger(self.log, logging.INFO) oldStdout = sys.stdout sys.stdout = sl self.log.info('Model:-Model Name:: '+str(self.model)) modelObj=eval(self.model+'()') gridOp = GridSearchCV(modelObj, param_grid=self.params,scoring=self.scoreParam, cv=self.cvSplit,verbose=10) gridFit=gridOp.fit(self.trainX,self.trainY) self.log.info('Model:-Model Name:: '+str(self.model)) self.log.info('Model:-ScoringType:: '+str(gridFit.scorer_)) self.log.info('Model:-Best Param:: '+str(gridFit.best_params_)) self.log.info('Model:-Validation Score:: '+str(gridFit.best_score_)) self.log.info('Model:-CV Result:: '+str(gridFit.cv_results_)) self.log.info('Model:-Best Estimator:: '+str(gridFit.best_estimator_)) sys.stdout = oldStdout return self.model,gridFit.best_params_,gridFit.best_score_,gridFit.best_estimator_ except Exception as inst: self.log.info("gridSearchOpt failed ==>"+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def randomSearchOpt(self): try: sl = StreamToLogger(self.log, logging.INFO) oldStdout = sys.stdout sys.stdout = sl self.log.info('Model:-Model Name:: '+str(self.model)) modelObj=eval(self.model+'()') randomOp = RandomizedSearchCV(modelObj, param_distributions=self.params,scoring=self.scoreParam,n_iter=self.nIter,cv=self.cvSplit,verbose=10) randomFit=randomOp.fit(self.trainX,self.trainY) self.log.info('Model:-Model Name:: '+str(self.model)) self.log.info('Model:-ScoringType:: '+str(randomFit.scorer_)) self.log.info('Model:-Best Param:: '+str(randomFit.best_params_)) self.log.info('Model:-Validation Score:: '+str(randomFit.best_score_)) self.log.info('Model:-CV Result:: '+str(randomFit.cv_results_)) self.log.info('Model:-Best Estimator:: '+str(randomFit.best_estimator_)) sys.stdout = oldStdout return self.model,randomFit.best_params_,randomFit.best_score_,randomFit.best_estimator_ except Exception as inst: self.log.info("RandomsearchOptimization failed ==>"+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def bayesianOpt(self,params): modelObj=eval(self.model+'(**'+str(params)+')') score=cross_val_score(modelObj, self.trainX, self.trainY,scoring=self.scoreParam,cv=self.cvSplit) return score.mean() def f(self,params): best=self.best count=self.count parameters=params count += 1 classObj=OptimizationTq(self.model,self.params,self.cvSplit,self.scoreParam,self.nIter,self.trainX,self.trainY) acc = classObj.bayesianOpt(parameters.copy()) return {'loss':-acc,'score': acc, 'status': STATUS_OK,'model' :self.model,'params': params}
cloudServer.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import boto3 import json import time import requests import datetime import uuid import shutil from websocket import create_connection from botocore.exceptions import ClientError import tarfile from pathlib import Path, PurePosixPath from stat import S_ISDIR from fabric import Connection import time import logging class awsGPUTraining(): def __init__(self, config): local_config = {"location":{"data":"aion/data/od", "code":"", "pretrainedModel":"aion/pretrainedModels"}, "jupyter":{"header":{"Authorization":"Token f3af05d5348301997fb014f245569e872d27bb9018fd70d2"}, "portNo":"8888", "notebook_path":"aion/code/AWS_GPU_OD_Training.ipynb"}} self.serverConfig = config["server"] self.sshConfig = config["ssh"] self.log = logging.getLogger('eion') self.codeLocation = local_config["location"]["code"] self.dataLocation = local_config["location"]["data"] self.pretrainedModelLocation = local_config["location"]["pretrainedModel"] self.jupyterConfig = local_config["jupyter"] self.serverIP = "" if self.serverConfig["awsAccessKeyId"] == "" or self.serverConfig["awsSecretAccessKey"] == "": raise ValueError("Cloud server configuration is not available.") if len(self.serverConfig["InstanceIds"]) == 0 and self.serverConfig["amiId"] == "": raise ValueError("Please provide either InstanceIds or amiId in server config") self.instanceId = [] self.separate_instance = False if self.serverConfig["amiId"] != "": self.separate_instance = True else: if len(self.serverConfig["InstanceIds"]): if isinstance(self.serverConfig["InstanceIds"], list): self.instanceId = self.serverConfig["InstanceIds"] elif isinstance(self.serverConfig["InstanceIds"], str): self.instanceId = [self.serverConfig["InstanceIds"]] self.ec2_client = boto3.client(self.serverConfig["serverName"], region_name=self.serverConfig["regionName"], aws_access_key_id=self.serverConfig["awsAccessKeyId"], aws_secret_access_key=self.serverConfig["awsSecretAccessKey"]) def __sftp_exists(self, sftp, path): try: sftp.stat(path) return True except:# IOError, e: #if e.errno == errno.ENOENT: return False def __rmtree(self, sftp, remotepath, level=0): for f in sftp.listdir_attr(remotepath): rpath = str(PurePosixPath(remotepath)/f.filename) if S_ISDIR(f.st_mode): self.__rmtree(sftp, rpath, level=(level + 1)) sftp.rmdir(rpath) else: rpath = str(PurePosixPath(remotepath)/f.filename) sftp.remove(rpath) def copy_files_to_server(self, location): try: client = Connection( host=self.serverIP, user=self.sshConfig["userName"], connect_kwargs={ "key_filename": self.sshConfig["keyFilePath"], }, ) client.sudo('rm -rf {}/*'.format(self.dataLocation)) tarFile = str((PurePosixPath(self.dataLocation).parent/PurePosixPath(self.dataLocation).name).with_suffix(".tar.gz")) client.put(location+'/test.tfrecord', self.dataLocation+'/test.tfrecord') client.put(location+'/train.tfrecord', self.dataLocation+'/train.tfrecord') client.put(location+'/pipeline.config', self.dataLocation+'/pipeline.config') client.put(location+'/label_map.pbtxt', self.dataLocation+'/label_map.pbtxt') client.put(location+'/model.config', self.dataLocation+'/model.config') if self.jupyterConfig != "": client.run("touch {}".format(self.dataLocation+'/log.txt')) except Exception as e: raise ValueError("Error in copying data to cloud server. " + str(e)) def __myexec(self, ssh, cmd, timeout, want_exitcode=False): # one channel per command stdin, stdout, stderr = ssh.exec_command(cmd) # get the shared channel for stdout/stderr/stdin channel = stdout.channel # we do not need stdin. stdin.close() # indicate that we're not going to write to that channel anymore channel.shutdown_write() # read stdout/stderr in order to prevent read block hangs stdout_chunks = [] stdout_chunks.append(stdout.channel.recv(len(stdout.channel.in_buffer))) # chunked read to prevent stalls while not channel.closed or channel.recv_ready() or channel.recv_stderr_ready(): # stop if channel was closed prematurely, and there is no data in the buffers. got_chunk = False readq, _, _ = select.select([stdout.channel], [], [], timeout) for c in readq: if c.recv_ready(): stdout_chunks.append(stdout.channel.recv(len(c.in_buffer))) got_chunk = True if c.recv_stderr_ready(): # make sure to read stderr to prevent stall stderr.channel.recv_stderr(len(c.in_stderr_buffer)) got_chunk = True ''' 1) make sure that there are at least 2 cycles with no data in the input buffers in order to not exit too early (i.e. cat on a >200k file). 2) if no data arrived in the last loop, check if we already received the exit code 3) check if input buffers are empty 4) exit the loop ''' if not got_chunk \ and stdout.channel.exit_status_ready() \ and not stderr.channel.recv_stderr_ready() \ and not stdout.channel.recv_ready(): # indicate that we're not going to read from this channel anymore stdout.channel.shutdown_read() # close the channel stdout.channel.close() break # exit as remote side is finished and our bufferes are empty # close all the pseudofiles stdout.close() stderr.close() if want_exitcode: # exit code is always ready at this point return (''.join(stdout_chunks), stdout.channel.recv_exit_status()) return ''.join(stdout_chunks) def __myexec1(self, ssh, cmd, timeout, want_exitcode=False): # one channel per command stdin, stdout, stderr = ssh.exec_command(cmd, get_pty=True) for line in iter(stderr.readline, ""): print(line, end="") stdin.close() stdout.close() stderr.close() def executeCode(self): try: client = Connection( host=self.serverIP, user=self.sshConfig["userName"], connect_kwargs={ "key_filename": self.sshConfig["keyFilePath"], }, ) cmd = 'python3.8 {} {} {}'.format(self.codeLocation, self.dataLocation, self.pretrainedModelLocation) client.run( cmd) except Exception as e: raise ValueError("Error in running code on cloud server. " + str(e)) def start_executing_notebook(self): try: publicIp_Port = self.serverIP + ":" + self.jupyterConfig["portNo"] conURL = "ws://" + publicIp_Port base = 'http://' + publicIp_Port + '' headers = self.jupyterConfig["header"] url = base + '/api/kernels' flag = True while flag: # deadlock need to add timeout response = requests.post(url, headers=headers) flag = False kernel = json.loads(response.text) # Load the notebook and get the code of each cell url = base + '/api/contents/' + self.jupyterConfig["notebook_path"] response = requests.get(url, headers=headers) file = json.loads(response.text) code = [c['source'] for c in file['content']['cells'] if len(c['source']) > 0 and c['cell_type']=='code' ] ws = create_connection(conURL + "/api/kernels/" + kernel["id"] + "/channels", header=headers) def send_execute_request(code): msg_type = 'execute_request'; content = {'code': code, 'silent': False} hdr = {'msg_id': uuid.uuid1().hex, 'username': 'test', 'session': uuid.uuid1().hex, 'data': datetime.datetime.now().isoformat(), 'msg_type': msg_type, 'version': '5.0'} msg = {'header': hdr, 'parent_header': hdr, 'metadata': {}, 'content': content} return msg for c in code: ws.send(json.dumps(send_execute_request(c))) # We ignore all the other messages, we just get the code execution output # (this needs to be improved for production to take into account errors, large cell output, images, etc.) error_msg = '' traceback_msg = '' for i in range(0, len(code)): msg_type = ''; while msg_type != "stream": rsp = json.loads(ws.recv()) msg_type = rsp["msg_type"] if msg_type == 'error': raise ValueError("Error on Cloud machine: "+rsp['content']['evalue']) ws.close() self.log.info('Status:- |...Execution Started`') except ClientError as e: raise ValueError(e) def __wait_for_completion(self, sftp, remoteLogFile, localLogFile): waiting = True error_msg = "" while waiting: time.sleep(5 * 60) try: sftp.get(str(remoteLogFile), str(localLogFile)) with open(localLogFile, "r") as f: content = f.readlines() for x in content: if "Error" in x: waiting = False error_msg = x if "success" in x: waiting = False except: raise (str(e)) return error_msg def copy_file_from_server(self, localPath): try: client = Connection( host=self.serverIP, user=self.sshConfig["userName"], connect_kwargs={ "key_filename": self.sshConfig["keyFilePath"], }, ) remoteLogFile = PurePosixPath(self.dataLocation)/'log.txt' localLogFile = Path(localPath)/'remote_log.txt' client.get(str(remoteLogFile), str(localLogFile)) tarFile = (PurePosixPath(self.dataLocation).parent/PurePosixPath(self.dataLocation).name).with_suffix(".tar.gz") client.get(str(tarFile), str(Path(localPath)/tarFile.name)) except: raise return str(Path(localPath)/tarFile.name) def create_instance(self): instances = self.ec2_client.run_instances( ImageId=self.serverConfig["amiId"], MinCount=1, MaxCount=1, InstanceType="t2.xlarge", KeyName="AION_GPU", SecurityGroupIds = ["sg-02c3a6c8dd67edb74"] ) self.instanceId = [instances['Instances'][0]['InstanceId']] def start_instance(self): if self.separate_instance: self.create_instance() try: response = self.ec2_client.start_instances(InstanceIds=self.instanceId, DryRun=True) except Exception as e: if 'DryRunOperation' not in str(e): raise ValueError("Error in starting the EC2 instance, check server configuration. " + str(e)) try: running_state_code = 16 response = self.ec2_client.start_instances(InstanceIds=self.instanceId, DryRun=False) instance_status_code = 0 while instance_status_code != running_state_code: response = self.ec2_client.describe_instances(InstanceIds=self.instanceId) instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code'] if instance_status_code == running_state_code: self.serverIP = response['Reservations'][0]['Instances'][0]['PublicIpAddress'] break except ClientError as e: raise ValueError("Error in starting the EC2 instance. " + str(e)) def terminate_instance(self): ec2 = boto3.resource(self.serverConfig["serverName"], region_name=self.serverConfig["regionName"], aws_access_key_id=self.serverConfig["awsAccessKeyId"], aws_secret_access_key=self.serverConfig["awsSecretAccessKey"]) ec2.instances.filter(InstanceIds=self.instanceId).terminate() # for terminating an ec2 instance def stop_server_instance(self): try: self.ec2_client.stop_instances(InstanceIds=self.instanceId, DryRun=True) except Exception as e: if 'DryRunOperation' not in str(e): raise stopped_state_code = 80 # Dry run succeeded, call stop_instances without dryrun try: response = self.ec2_client.stop_instances(InstanceIds=self.instanceId, DryRun=False) response = self.ec2_client.describe_instances(InstanceIds=self.instanceId) instance_status_code = 0 while instance_status_code != stopped_state_code: response = self.ec2_client.describe_instances(InstanceIds=self.instanceId) instance_status_code = response['Reservations'][0]['Instances'][0]['State']['Code'] if instance_status_code == stopped_state_code: break except: raise ValueError("Error in stopping the EC2 instance {}.Please stop it manually ".format(self.instanceId[0])) if self.separate_instance: try: self.terminate_instance() except: raise ValueError("Error in terminating the EC2 instance {}.Please terminate it manually ".format(self.instanceId[0]))
classificationModel.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import time import os import sys import numpy as np from numpy import arange from numpy import argmax import json from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import recall_score from sklearn.metrics import precision_score from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.linear_model import SGDClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score from sklearn.metrics import f1_score from sklearn.svm import SVC from xgboost import XGBClassifier from lightgbm import LGBMClassifier from catboost import CatBoostClassifier from sklearn.preprocessing import binarize from learner.optimizetechnique import OptimizationTq from learner.defaultAlgos import defaultParams from learner.parameters import parametersDefine from hyperopt import fmin, tpe, hp, STATUS_OK, Trials import logging from learner.aion_matrix import aion_matrix import mlflow from pathlib import Path from uncertainties.aionUQ import aionUQ # apply threshold to positive probabilities to create labels def to_labels(pos_probs, threshold): return (pos_probs >= threshold).astype('int') class ClassifierModel(): def __init__(self,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,modelType,MakeFP0,MakeFN0,deployLocation): self.modelList =modelList self.params =params self.trainX =trainX self.X =trainX self.trainY =trainY self.testX = testX self.testY = testY self.method =method self.scoreParam=scoreParam self.cvSplit=cvSplit self.numIter=numIter self.geneticParam=geneticParam self.MakeFP0= MakeFP0 self.MakeFN0=MakeFN0 self.log = logging.getLogger('eion') self.modelType = modelType self.uq_x_train = trainX self.uq_x_test = testX self.uq_y_train = trainY self.uq_y_test = testY self.deployLocation = deployLocation self.AlgorithmNames={'Logistic Regression':'LogisticRegression','Stochastic Gradient Descent':'SGDClassifier','Naive Bayes':'GaussianNB','Support Vector Machine':'SVC','K Nearest Neighbors':'KNeighborsClassifier','Decision Tree':'DecisionTreeClassifier','Random Forest':'RandomForestClassifier','Gradient Boosting':'GradientBoostingClassifier','Extreme Gradient Boosting (XGBoost)':'XGBClassifier','Categorical Boosting (CatBoost)': 'CatBoostClassifier','Light Gradient Boosting (LightGBM)': 'LGBMClassifier','Bagging (Ensemble)':'BaggingClassifier','Stacking (Ensemble)':'StackingClassifier','Voting (Ensemble)':'VotingClassifier','Deep Q Network':'DQN','Dueling Deep Q Network':'DDQN','Neural Architecture Search':'NAS'} self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()} def check_threshold(self,estimator,testX,testY,threshold_range,checkParameter,modelName): thresholdx = -1 for threshold in threshold_range: predictedData = estimator.predict_proba(testX) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) p_score = precision_score(testY, predictedData) #self.log.info('-------------> Precision:'+str(p_score)) r_score = recall_score(testY, predictedData) #self.log.info('-------------> Rscore:'+str(r_score)) #self.log.info(confusion_matrix(testY, predictedData)) tn, fp, fn, tp = confusion_matrix(testY, predictedData).ravel() if(checkParameter.lower() == 'fp'): if fp == 0: if(p_score == 1): thresholdx = threshold self.log.info('---------------> Best Threshold:'+str(threshold)) self.log.info('---------------> Best Precision:'+str(p_score)) self.log.info('---------------> Best Recall:'+str(r_score)) self.log.info('---------------> TN:'+str(tn)) self.log.info('---------------> FP:'+str(fp)) self.log.info('---------------> FN:'+str(fn)) self.log.info('---------------> TP:'+str(tp)) break if(checkParameter.lower() == 'fn'): if fn == 0: if(r_score == 1): thresholdx = threshold self.log.info('---------------> Best Threshold:'+str(threshold)) self.log.info('---------------> Best Precision:'+str(p_score)) self.log.info('---------------> Best Recall:'+str(r_score)) self.log.info('---------------> TN:'+str(tn)) self.log.info('---------------> FP:'+str(fp)) self.log.info('---------------> FN:'+str(fn)) self.log.info('---------------> TP:'+str(tp)) break return(thresholdx,p_score,r_score) def getBestModel(self,fp0,fn0,threshold,bestthreshold,rscore,brscore,pscore,bpscore,tscore,btscore): cmodel = False if(threshold != -1): if(bestthreshold == -1): cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif fp0: if rscore > brscore: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif rscore == brscore: if tscore > btscore or btscore == -0xFFFF: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif fn0: if pscore > bpscore: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif pscore == bpscore: if tscore > btscore or btscore == -0xFFFF: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore else: if tscore > btscore or btscore == -0xFFFF: cmodel = True btscore = tscore else: if(bestthreshold == -1): if tscore > btscore or btscore == -0xFFFF: cmodel = True btscore = tscore return cmodel,btscore,bestthreshold,brscore,bpscore def logMlflow(self, runName, params, metrices, estimator, algoName=None): with mlflow.start_run(run_name = runName): for k,v in params.items(): mlflow.log_param(k, v) for k,v in metrices.items(): mlflow.log_metric(k, v) if algoName == 'CatBoostClassifier': mlflow.catboost.log_model(estimator, "model") else: mlflow.sklearn.log_model(estimator, "model") model_uri = mlflow.get_artifact_uri("model") """ for some dataset evaluate takes more than 90 min, so commenting till some solution is not found evaluate_data = self.testX.copy() evaluate_data['label'] = self.testY.copy() mlflow.evaluate(model_uri, data=evaluate_data, targets='label', model_type="classifier") del evaluate_data """ def classModelling(self, modelOrFeatureBased,code_configure): paramObj=parametersDefine() bestModel='None' bestParams={} bestScore=-0xFFFF bestEstimator = 'None' bestpipelineModel='None' scoredetails = '' threshold = -1 bestthreshold = -1 precisionscore =-1 bestprecisionscore=-1 recallscore = -1 bestrecallscore=-1 self.log.info('\n---------- ClassifierModel has started ----------') objClf = aion_matrix() try: self.log.info('Status:- |... Search Optimization Method applied: '+self.method) for modelName in self.modelList: if modelName in ['Bagging (Ensemble)','Voting (Ensemble)','Stacking (Ensemble)','Dueling Deep Q Network','Deep Q Network','Neural Architecture Search']: if modelName == 'Bagging (Ensemble)': from ensemble.ensemble_bagging import ensemble_bagging ensemble_bagging_obj = ensemble_bagging(self.params[modelName],self.scoreParam,self.MakeFP0,self.MakeFN0) estimator,modelParams,score,model,threshold,precisionscore,recallscore = ensemble_bagging_obj.ensemble_bagging_classifier(self.trainX,self.trainY,self.testX,self.testY) if modelName == 'Stacking (Ensemble)': from ensemble.ensemble_stacking import ensemble_stacking ensemble_stacking_obj = ensemble_stacking(self.params[modelName],self.scoreParam) estimator,modelParams,score,model,threshold,precisionscore,recallscore = ensemble_stacking_obj.ensemble_stacking_classifier(self.trainX,self.trainY,self.testX,self.testY,self.MakeFP0,self.MakeFN0,self.modelList) if modelName == 'Voting (Ensemble)': from ensemble.ensemble_voting import ensemble_voting ensemble_voting_obj = ensemble_voting("",self.scoreParam) #bug 12437 status,estimator,modelParams,score,model,threshold,precisionscore,recallscore = ensemble_voting_obj.ensemble_voting_classifier(self.trainX,self.trainY,self.testX,self.testY,self.MakeFP0,self.MakeFN0,self.modelList) if status != "SUCCESS": #bug 12437 continue if modelName == 'Deep Q Network': from reinforcement.DRL_train import ReinformentLearning rlObj = ReinformentLearning(self.params[modelName],self.scoreParam,'Classification') estimator,modelParams,score,model,threshold,precisionscore,recallscore = rlObj.TrainRL(self.trainX,self.trainY,self.testX,self.testY,'DQN',self.deployLocation) if modelName == 'Dueling Deep Q Network': from reinforcement.DRL_train import ReinformentLearning rlObj = ReinformentLearning(self.params[modelName],self.scoreParam,'Classification') estimator,modelParams,score,model,threshold,precisionscore,recallscore = rlObj.TrainRL(self.trainX,self.trainY,self.testX,self.testY,'DDQN',self.deployLocation) ''' if modelName == 'Neural Architecture Search': from nas.aionNAS import aionNAS objNAS = aionNAS('Classification',self.params[modelName],self.trainX,self.testX,self.trainY,self.testY,self.deployLocation) estimator,modelParams,score,model,threshold,precisionscore,recallscore=objNAS.nasMain(self.scoreParam) ''' if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(score)+',"ModelUncertainty":"NA"}' status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore) if status: bestScore =bscore bestModel =model bestParams=modelParams bestEstimator=estimator bestthreshold = bthres bestrecallscore = brscore bestprecisionscore = bpscore self.log.info('Status:- |... ML Algorithm applied: '+modelName) self.log.info('Status:- |... Score: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\n') continue paramSpace=self.params[modelName].copy() algoName = self.AlgorithmNames[modelName] paramDict =paramObj.paramDefine(paramSpace,self.method) if not self.method == 'bayesopt': paramSize = paramObj.getParamSpaceSize(paramDict) else: paramSize = 0 if (self.method == 'bayesopt' and not paramDict) or (not self.method == 'bayesopt' and paramSize<=0): try: start = time.time() #function call defObj = defaultParams(algoName,paramDict,self.scoreParam,self.MakeFP0, self.MakeFN0,paramSize) estimator, modelParams, model,score, threshold, precisionscore, recallscore =defObj.startTrainingClassification(self.trainX,self.trainY,self.testX,self.testY) executionTime = time.time() - start if (scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"' + self.modelToAlgoNames[model] + '","FeatureEngineering":"' + str( modelOrFeatureBased) + '","Score":' + str(score) + ',"ModelUncertainty":"NA"}' status, bscore, bthres, brscore, bpscore = self.getBestModel(self.MakeFP0, self.MakeFN0,threshold, bestthreshold,recallscore, bestrecallscore,precisionscore, bestprecisionscore,score, bestScore) self.log.info('---------> Total Execution: ' + str(executionTime) + '\n') if status: bestScore = bscore bestModel = model bestParams = modelParams bestEstimator = estimator bestthreshold = bthres bestrecallscore = brscore bestprecisionscore = bpscore self.log.info('Status:- |... ML Algorithm applied: ' + modelName) self.log.info('Status:- |... Score: ' + objClf.get_print_score(self.scoreParam) + '=' + str( round(score, 2)) + '\n') except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------' + modelName + ' Model Execution failed!!!.' + str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) self.log.info('\n < ---------- Model Execution Failed End --------->') continue # call algorithms with default valuepass if self.method == 'bayesopt': code_configure.add_model(algoName,paramSpace) else: paramDictCopy = paramDict # numpy array is not json serializable #numpy is already imported but still np.ndarray raise error import numpy as np for key,value in paramDictCopy.items(): if isinstance(value, np.ndarray): paramDictCopy[key] = paramDictCopy[key].tolist() code_configure.add_model(algoName,paramDictCopy) trainingStatus = 'Success' if self.method =='grid': try: self.log.info("-------> Optimization Method :Grid Search") self.log.info("-------> Model Name: "+str(modelName)) opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) start = time.time() model,modelParams,score,estimator=opTq.gridSearchOpt() executionTime=time.time() - start if not self.testX.empty: predictedData = estimator.predict(self.testX) score = objClf.get_score(self.scoreParam,self.testY,predictedData) else: score = score*100 problemName = estimator.__class__.__name__ runName = algoName + '_' + modelOrFeatureBased metrices = {} metrices["score"] = score try: self.logMlflow(runName, modelParams, metrices, estimator, algoName) except Exception as e: self.log.info('----------> ML Flow error!!!. ' + str(e)) # usnish pass output_jsonobject = "" problemName = estimator.__class__.__name__ self.log.info('----------> Testing Score: '+str(score)) try: if ((estimator.__class__.__name__ == "ABCMeta") or (model in ['SGDClassifier','XGBClassifier','CatBoostClassifier','LGBMClassifier']) ): self.log.info('-----> Model Uncertainty Not Supported') else: uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertainty_per=uqObj.uqMain_BBMClassification(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq") self.log.info("-------> model_confidence: "+str(model_confidence_per)+str('%')) self.log.info("-------> model_uncertainty: "+str(model_uncertainty_per)+str('%')) except: pass if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(score)+',"ModelUncertainty":'+str(json.dumps(output_jsonobject))+'}' self.log.info('----------> Testing Score: '+str(score)) import numpy as np if self.MakeFP0: self.log.info('-------- Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FP',algoName) self.log.info('-------- Calculate Threshold for FP End-------') if self.MakeFN0: self.log.info('-------- Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FN',algoName) self.log.info('-------- Calculate Threshold for FN End-------') self.log.info('----------> Total Execution: '+str(executionTime)+'\n') status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore) if status: bestScore =bscore bestModel =model bestParams=modelParams bestEstimator=estimator bestthreshold = bthres bestrecallscore = brscore bestprecisionscore = bpscore except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) self.log.info('\n < ---------- Model Execution Failed End --------->') trainingStatus = 'Error (Exception)' elif self.method == 'random': try: self.log.info("-------> Optimization Method :Random Search") self.log.info("-------> Model Name: "+str(modelName)) start = time.time() opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) model,modelParams,score,estimator=opTq.randomSearchOpt() executionTime=time.time() - start if not self.testX.empty: predictedData = estimator.predict(self.testX) score = objClf.get_score(self.scoreParam,self.testY,predictedData) else: score = score*100 problemName = estimator.__class__.__name__ runName = algoName + '_' + modelOrFeatureBased metrices = {} metrices["score"] = score try: self.logMlflow(runName, modelParams, metrices, estimator, algoName) except Exception as e: self.log.info('----------> ML Flow error!!!. ' + str(e)) # usnish pass import numpy as np if self.MakeFP0: self.log.info('-------- Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FP',algoName) self.log.info('-------- Calculate Threshold for FP End-------') if self.MakeFN0: self.log.info('-------- Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FN',algoName) self.log.info('-------- Calculate Threshold for FN End-------') if threshold != -1: if not self.testX.empty: predictedData = estimator.predict_proba(self.testX) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) score = objClf.get_score(self.scoreParam,self.testY,predictedData) else: predictedData = estimator.predict_proba(self.trainX) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) score = objClf.get_score(self.scoreParam,self.trainY,predictedData) self.log.info('---------> Total Execution: '+str(executionTime)+'\n') output_jsonobject = "" problemName = estimator.__class__.__name__ self.log.info('----------> Testing Score: '+str(score)) try: if ((estimator.__class__.__name__ == "ABCMeta") or (model in ['SGDClassifier','XGBClassifier','CatBoostClassifier','LGBMClassifier']) ): self.log.info('-----> Model Uncertainty Not Supported') else: uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertainty_per=uqObj.uqMain_BBMClassification(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq") self.log.info("-------> model_confidence: "+str(model_confidence_per)+str('%')) self.log.info("-------> model_uncertainty: "+str(model_uncertainty_per)+str('%')) except Exception as e: pass if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(score)+',"ModelUncertainty":'+str(json.dumps(output_jsonobject))+'}' status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore) if status: bestScore =bscore bestModel =model bestParams=modelParams bestEstimator=estimator bestthreshold = threshold bestrecallscore = recallscore bestprecisionscore = precisionscore except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) self.log.info('\n < ---------- Model Execution Failed End --------->') trainingStatus = 'Error (Exception)' elif self.method == 'bayesopt': try: self.log.info("-------> Optimization Method :BayesOpt") self.log.info("-------> Model Name: "+str(modelName)) opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY) fun=opTq.f trials = Trials() start = time.time() best = fmin(fun,paramDict,algo=tpe.suggest, max_evals=self.numIter, trials=trials) executionTime=time.time() - start results = sorted(trials.results, key = lambda x: x['loss']) bestresult=results[0] model=bestresult['model'] score=bestresult['score'] modelParams=bestresult['params'] executionTime=time.time() - start res = ', '.join("{!s}={!r}".format(key,val) for (key,val) in modelParams.items()) modelObj=eval(model+'('+res+')') estimator = modelObj.fit(self.trainX,self.trainY) if not self.testX.empty: predictedData = estimator.predict(self.testX) score = objClf.get_score(self.scoreParam,self.testY,predictedData) problemName = estimator.__class__.__name__ runName = algoName + '_' + modelOrFeatureBased metrices = {} metrices["score"] = score try: self.logMlflow(runName, modelParams, metrices, estimator, algoName) except Exception as e: self.log.info('----------> ML Flow error!!!. ' + str(e)) # usnish pass output_jsonobject = "" problemName = estimator.__class__.__name__ self.log.info('----------> Testing Score: '+str(score)) try: if ((estimator.__class__.__name__ == "ABCMeta") or (model in ['SGDClassifier','XGBClassifier','CatBoostClassifier','LGBMClassifier']) ): self.log.info('-----> Model Uncertainty Not Supported') else: uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation) accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertainty_per=uqObj.uqMain_BBMClassification(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq") self.log.info("-------> model_confidence: "+str(model_confidence_per)+str('%')) self.log.info("-------> model_uncertainty: "+str(model_uncertainty_per)+str('%')) except: pass if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(score)+',"ModelUncertainty":'+str(json.dumps(output_jsonobject))+'}' ''' test_accuracy = accuracy_score(self.testY,predictedData) test_precision = precision_score(self.testY,predictedData,average='macro') self.log.info('---------> Test Accuracy: '+str(test_accuracy)) self.log.info('---------> Test Precision: '+str(test_precision)) ''' import numpy as np if self.MakeFP0: self.log.info('-------- Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = self.check_threshold(estimator,self.testX,self.testY,threshold_range,'FP',algoName) self.log.info('-------- Calculate Threshold for FP End-------') if self.MakeFN0: self.log.info('-------- Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = self.check_threshold(estimator,self.testX,self.testY,threshold_range,'FN',algoName) self.log.info('-------- Calculate Threshold for FN End-------') self.log.info('---------> Total Execution: '+str(executionTime)+'\n') status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore) if status: bestScore =score bestModel =model bestParams=modelParams res = ', '.join("{!s}={!r}".format(key,val) for (key,val) in bestParams.items()) modelObj=eval(bestModel+'('+res+')') bestEstimator=estimator bestthreshold = threshold bestrecallscore = recallscore bestprecisionscore = precisionscore except Exception as inst: self.log.info('\n < ---------- Model Execution Failed Start--------->') self.log.info('\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst)) self.log.info('\n < ---------- Model Execution Failed End --------->') trainingStatus = 'Error (Exception)' else: trainingStatus = 'Error (HyperTunning Algo Not Supported)' pass self.log.info('Status:- |... ML Algorithm applied: '+modelName) if trainingStatus.lower() == 'success': self.log.info('Status:- |... Score after hyperparameter tuning: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\n') else: self.log.info('Status:- |... Training Error : '+trainingStatus+'\n') self.log.info('---------- ClassifierModel End ---------- \n') if bestModel != 'None': self.log.info('\n------- Best Model and its parameters -------------') self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2))) self.log.info("-------> Best Name: "+str(bestModel)) self.log.info("-------> Best Score: "+str(bestScore)) return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails,bestthreshold,bestprecisionscore,bestrecallscore else: raise Exception("Sorry, no model is trained") except Exception as inst: self.log.info( '\n-----> ClassifierModel failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
aion_matrix.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import warnings warnings.filterwarnings('ignore') import pandas as pd from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import roc_curve, auc from sklearn.metrics import roc_auc_score from sklearn.metrics import accuracy_score from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error,make_scorer from sklearn.metrics import mean_squared_error from sklearn.metrics import recall_score from sklearn.metrics import precision_score from sklearn.metrics import f1_score import logging import numpy as np from sklearn.preprocessing import binarize from sklearn.preprocessing import LabelBinarizer from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error,make_scorer from sklearn.metrics import mean_squared_error class aion_matrix: def __init__(self): self.log = logging.getLogger('eion') def get_print_score(self,matrix): if 'accuracy' in str(matrix).lower(): return 'Accuracy' elif 'recall' in str(matrix).lower(): return 'Recall' elif 'precision' in str(matrix).lower(): return 'Precision' elif 'f1_score' in str(matrix).lower(): return 'F1_Score' elif 'roc_auc' in str(matrix).lower(): return 'ROC_AUC' elif 'mse' in str(matrix).lower() or 'neg_mean_squared_error' in str(matrix).lower(): return 'Mean Squared Error(MSE)' elif 'rmse' in str(matrix).lower() or 'neg_root_mean_squared_error' in str(matrix).lower(): return 'Root Mean Suared Error(RMSE)' elif 'mae' in str(matrix).lower() or 'neg_mean_absolute_error' in str(matrix).lower(): return 'Mean Absolute Error (MAE)' elif 'r2' in str(matrix).lower(): return 'R-Squared(R2)' else: return 'Unknown' def get_score(self,matrix,actual,predict): if 'accuracy' in str(matrix).lower(): ensemble_score = accuracy_score(actual,predict) ensemble_score = ensemble_score*100 elif 'recall' in str(matrix).lower(): ensemble_score = recall_score(actual,predict,average='macro') ensemble_score = ensemble_score*100 elif 'precision' in str(matrix).lower(): ensemble_score = precision_score(actual,predict,average='macro') ensemble_score = ensemble_score*100 elif 'f1_score' in str(matrix).lower(): ensemble_score = f1_score(actual,predict, average='macro') ensemble_score = ensemble_score*100 elif 'roc_auc' in str(matrix).lower(): try: ensemble_score = roc_auc_score(actual,predict,average="macro") except: try: actual = pd.get_dummies(actual) predict = pd.get_dummies(predict) ensemble_score = roc_auc_score(actual,predict, average='weighted', multi_class='ovr') except: ensemble_score = 0 ensemble_score = ensemble_score*100 elif ('mse' in str(matrix).lower()) or ('neg_mean_squared_error' in str(matrix).lower()): ensemble_score = mean_squared_error(actual,predict) elif ('rmse' in str(matrix).lower()) or ('neg_root_mean_squared_error' in str(matrix).lower()): ensemble_score=mean_squared_error(actual,predict,squared=False) elif ('mae' in str(matrix).lower()) or ('neg_mean_absolute_error' in str(matrix).lower()): ensemble_score=mean_absolute_error(actual,predict) elif 'r2' in str(matrix).lower(): ensemble_score=r2_score(actual,predict) return round(ensemble_score,2) def getClassificationPerformaceMatrix(self,le_trainY,predictedData,labelMaps): setOfyTrue = set(le_trainY) unqClassLst = list(setOfyTrue) if(str(labelMaps) != '{}'): inv_mapping_dict = {v: k for k, v in labelMaps.items()} unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict) unqClassLst2 = list(unqClassLst2) else: unqClassLst2 = unqClassLst indexName = [] columnName = [] targetnames=[] for item in unqClassLst2: indexName.append("act:"+str(item)) columnName.append("pre:"+str(item)) targetnames.append(str(item)) matrixconfusion = pd.DataFrame(confusion_matrix(le_trainY,predictedData, labels = unqClassLst),index = indexName, columns = columnName) #pd.set_option('expand_frame_repr', False) pd.set_option('display.max_columns',len(targetnames)+2) self.log.info('-------> Confusion Matrix: ') self.log.info(matrixconfusion) pd.reset_option('display.max_columns') #pd.reset_option('expand_frame_repr') #self.log.info('-------> Confusion Matrix With Labels: ') #self.log.info(confusion_matrix(le_trainY,predictedData, labels = unqClassLst)) #print(unqClassLst2) classificationreport = pd.DataFrame(classification_report(le_trainY, predictedData, labels = unqClassLst,target_names=targetnames,output_dict=True)).transpose() self.log.info('-------> Classification Report: ') self.log.info(classificationreport) lb = LabelBinarizer() lb.fit(le_trainY) transformTarget= lb.transform(le_trainY) transformPredict = lb.transform(predictedData) rocaucscore = roc_auc_score(transformTarget,transformPredict,average="macro") self.log.info('-------> ROC AUC SCORE :'+str(rocaucscore)) matrixconfusion = matrixconfusion.to_json(orient='index') classificationreport = classificationreport.to_json(orient='index') matrix = '"ConfusionMatrix":'+matrixconfusion+',"ClassificationReport":'+classificationreport+',"ROC_AUC_SCORE":'+str(rocaucscore) return(matrix) def get_regression_matrix(self,targetData,predictedData): r2score=r2_score(targetData, predictedData) self.log.info('-------> R2_score :'+str(r2score)) meanabsoluteerror=(mean_absolute_error(targetData, predictedData)) self.log.info('-------> MAE :'+str(meanabsoluteerror)) meanssquatederror=mean_squared_error(targetData, predictedData) self.log.info('-------> MSE :'+str(meanssquatederror)) rootmeanssquatederror=mean_squared_error(targetData, predictedData,squared=False) self.log.info('-------> RMSE :'+str(rootmeanssquatederror)) targetArray, predictedArray = np.array(targetData), np.array(predictedData) try: EPSILON = 1e-10 meanpercentageerror=np.mean(np.abs((targetArray - predictedArray) / (targetArray+EPSILON)))*100 except ZeroDivisionError: meanpercentageerror = 0 self.log.info('-------> MAPE :'+str(meanpercentageerror)) try: normalised_rmse_percentage = round(((rootmeanssquatederror/ ( np.max(targetData) - np.min(targetData) )) * 100), 4) except Exception as e: normalised_rmse_percentage = -1 self.log.info('-------> Normalised RMSE percentage :'+str(normalised_rmse_percentage)) matrix = '"MAE":'+str(meanabsoluteerror)+',"R2Score":'+str(r2score)+',"MSE":'+str(meanssquatederror)+',"MAPE":'+str(meanpercentageerror)+',"RMSE":'+str(rootmeanssquatederror)+',"Normalised RMSE(%)":'+str(normalised_rmse_percentage) return matrix def getbestfeatureModel(self,modelType,scoreParam,score1,score2,model1,model2,threshold1,pscore1,rscore1,threshold2,pscore2,rscore2): best_feature_model = 'Model1' self.log.info('\n ---------- Summary Start ------------') if modelType.lower() == "classification": if(threshold1 == -1 and threshold2 == -1): if score1> score2: self.log.info('-------> Best Features: Model1') self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model1' else: self.log.info('-------> Best Features: Model2') self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model2' elif(threshold1 == -1): self.log.info('-------> Best Features: Model2') self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model2' elif(threshold1 == -2): self.log.info('-------> Best Features: Model1') self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model1' else: if pscore1 == pscore2: if rscore1 > rscore2: self.log.info('-------> Best Features: Model1') self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model1' else: self.log.info('-------> Best Features: Model2') self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model2' elif rscore1 == rscore2: if pscore1 > pscore2: self.log.info('-------> Best Features: Model1') self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model1' else: self.log.info('-------> Best Features: Model2') self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model2' elif modelType.lower() == "regression": if scoreParam == "r2" or scoreParam == "explained_variance": if score1> score2 : self.log.info('-------> Best Features: Model1') self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model1' else: self.log.info('-------> Best Features: Model2') self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model2' else: if score1< score2 : self.log.info('-------> Best Features: Model1') self.log.info('-------> Best Model: '+str(model1)) self.log.info('-------> Best Score: '+str(score1)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model1' else: self.log.info('-------> Best Features: Model2') self.log.info('-------> Best Model: '+str(model2)) self.log.info('-------> Best Score: '+str(score2)) self.log.info('-------> Scoring Param: '+str(scoreParam)) best_feature_model = 'Model2' self.log.info('---------- Summary End ------------\n') return(best_feature_model) def check_threshold(self,estimator,testX,testY,threshold_range,checkParameter,modelName): thresholdx = -1 for threshold in threshold_range: predictedData = estimator.predict_proba(testX) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold)#bug 12437 p_score = precision_score(testY, predictedData) #self.log.info('-------------> Precision:'+str(p_score)) r_score = recall_score(testY, predictedData) #self.log.info('-------------> Rscore:'+str(r_score)) #self.log.info(confusion_matrix(testY, predictedData)) tn, fp, fn, tp = confusion_matrix(testY, predictedData).ravel() if(checkParameter.lower() == 'fp'): if fp == 0: if(p_score == 1): thresholdx = threshold self.log.info('---------------> Best Threshold:'+str(threshold)) self.log.info('---------------> Best Precision:'+str(p_score)) self.log.info('---------------> Best Recall:'+str(r_score)) self.log.info('---------------> TN:'+str(tn)) self.log.info('---------------> FP:'+str(fp)) self.log.info('---------------> FN:'+str(fn)) self.log.info('---------------> TP:'+str(tp)) break if(checkParameter.lower() == 'fn'): if fn == 0: if(r_score == 1): thresholdx = threshold self.log.info('---------------> Best Threshold:'+str(threshold)) self.log.info('---------------> Best Precision:'+str(p_score)) self.log.info('---------------> Best Recall:'+str(r_score)) self.log.info('---------------> TN:'+str(tn)) self.log.info('---------------> FP:'+str(fp)) self.log.info('---------------> FN:'+str(fn)) self.log.info('---------------> TP:'+str(tp)) break return(thresholdx,p_score,r_score) def getBestModel(self,fp0,fn0,threshold,bestthreshold,rscore,brscore,pscore,bpscore,tscore,btscore): cmodel = False if(threshold != -1): if(bestthreshold == -1): cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif fp0: if rscore > brscore: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif rscore == brscore: if tscore > btscore or btscore == -0xFFFF: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif fn0: if pscore > bpscore: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif pscore == bpscore: if tscore > btscore or btscore == -0xFFFF: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore else: if tscore > btscore or btscore == -0xFFFF: cmodel = True btscore = tscore else: if(bestthreshold == -1): if tscore > btscore or btscore == -0xFFFF: cmodel = True btscore = tscore return cmodel,btscore,bestthreshold,brscore,bpscore
ImageLearning.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import tensorflow from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Flatten from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.utils import to_categorical from tensorflow.keras.preprocessing import image import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Input from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.applications import VGG16 from tensorflow.keras.callbacks import EarlyStopping import logging from sklearn.preprocessing import LabelEncoder from statistics import mean import sys from learner.machinelearning import machinelearning from learner.aion_matrix import aion_matrix from profiler.imageAug import ImageAugmentation from pathlib import Path class ImageLearning: def __init__(self,dataFrame,input_directory,outputdir,modelname,hyperParam, AugEnabled,keepAugImages,operations,augConf): self.image_list = dataFrame self.input_directory = input_directory self.outputdir = outputdir self.modelname = modelname self.hyperParam = hyperParam self.labelMapping={} self.log = logging.getLogger('eion') self.AIONNumericDtypes=['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] self.AugEnabled = AugEnabled self.keepAugImages = keepAugImages self.operations = operations self.augConf = augConf def TrainCAST(self,predicted_data_file): datatype = self.image_list['Label'].dtypes if datatype not in self.AIONNumericDtypes: labelEncode= LabelEncoder() self.image_list['Label'] = self.image_list['Label'].apply(str) self.image_list['Label'] = labelEncode.fit_transform(self.image_list['Label']) self.labelMapping = dict(zip(labelEncode.classes_, labelEncode.transform(labelEncode.classes_))) self.log.info('\n-------> First Ten Rows of Input Data After Encoding: ') self.log.info(self.image_list.head(10)) self.log.info('Status:- |... Target Feature Encoding Done') if not os.path.exists(self.outputdir): os.makedirs(self.outputdir) train_df, test_df = train_test_split(self.image_list, random_state=42, test_size=self.hyperParam['test_split_ratio']) if self.AugEnabled: csv_file = "tempTrainDf.csv" train_df.to_csv(csv_file, index=False) ia = ImageAugmentation(self.input_directory, csv_file) csv_file = ia.augment("imageclassification", self.operations,None,self.augConf) train_df = pd.read_csv(csv_file) Path(csv_file).unlink() train_image = [] train_df.reset_index(drop=True, inplace=True) for i in range(train_df.shape[0]): #print(os.path.join(self.input_directory,str(self.image_list['File'][i]))) img = image.load_img(os.path.join(self.input_directory,str(train_df['File'][i])), target_size=(self.hyperParam['img_width'],self.hyperParam['img_height'],self.hyperParam['img_channel']), grayscale=False) img = image.img_to_array(img) img = img/255 train_image.append(img) test_image = [] test_df.reset_index(drop=True, inplace=True) for i in range(test_df.shape[0]): #print(os.path.join(self.input_directory,str(self.image_list['File'][i]))) img = image.load_img(os.path.join(self.input_directory,str(test_df['File'][i])), target_size=(self.hyperParam['img_width'],self.hyperParam['img_height'],self.hyperParam['img_channel']), grayscale=False) img = image.img_to_array(img) img = img/255 test_image.append(img) self.log.info('Status:- |... Image Loading Done') X_train = np.array(train_image) y_train = train_df['Label'] X_test = np.array(test_image) y_test = test_df['Label'] ytrain = y_train.values ytrain = to_categorical(ytrain) ytest = y_test.values ytest = to_categorical(ytest) #print(y) self.log.info("Loading Imagenet Weights...") if self.modelname == "densenet": self.log.info('Loading Densenet model') baseModel = tensorflow.keras.applications.DenseNet121(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.hyperParam['img_width'], self.hyperParam['img_height'], self.hyperParam['img_channel']))) #98 elif self.modelname == "inception": self.log.info('Loading Inception model') baseModel = tensorflow.keras.applications.InceptionV3(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.hyperParam['img_width'], self.hyperParam['img_height'], self.hyperParam['img_channel']))) #97 headModel = baseModel.output headModel = Flatten(name="flatten")(headModel) headModel = Dense(1024, activation='relu')(headModel) headModel = Dropout(0.5)(headModel) headModel = Dense(2, activation='sigmoid')(headModel) model = Model(inputs=baseModel.input, outputs=headModel) self.log.info("[INFO] compiling model...") opt = Adam(lr=self.hyperParam['lr']) model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) #early_stop = EarlyStopping(monitor='val_loss',patience=2) #history = model.fit(X_train, y_train, epochs=hyperparam_config['epochs'], validation_data=(X_test, y_test), callbacks=[early_stop]) history = model.fit(X_train, ytrain, epochs=self.hyperParam['epochs'], validation_data=(X_test, ytest)) self.log.info('Status:- |... Image Classification Algorithm applied:'+str(self.modelname)) #Saving trained model weights model.save_weights(os.path.join(self.outputdir, self.modelname)) saved_model = self.modelname modelname = self.modelname prediction = model.predict(X_train) predictedData = np.argmax(prediction,axis=1) mlobj = machinelearning() self.log.info('\n--------- Performance Matrix with Train Data ---------') trainingperformancematrix = mlobj.getClassificationPerformaceMatrix(y_train, predictedData,self.labelMapping) prediction = model.predict(X_test) predictedData = np.argmax(prediction,axis=1) self.log.info('\n--------- Performance Matrix with Test Data ---------') performancematrix = mlobj.getClassificationPerformaceMatrix(y_test, predictedData,self.labelMapping) df_test = pd.DataFrame() df_test['actual'] = y_test df_test['predict'] = predictedData df_test.to_csv(predicted_data_file) objClf = aion_matrix() scoring_param = 'Accuracy' score = objClf.get_score(scoring_param,y_test,predictedData) #score = mean(history.history['accuracy']) if self.AugEnabled and not self.keepAugImages: ia.removeAugmentedImages(train_df) scoredetails = '{"Model":"'+modelname+'","Score":'+str(round(score,2))+'}' self.log.info('Status:- |... Score Accuracy: '+str(round(score,2))) return saved_model,modelname,'ImageClassification',scoring_param,score,scoredetails,self.labelMapping,trainingperformancematrix,performancematrix
image_eda.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import warnings warnings.simplefilter("ignore") import os import numpy as np from numpy import asarray import cv2 import sys import random import glob as glob import math as m # for gamma function, called from scipy.special import gamma as tgamma import matplotlib.image as mpimg import skimage from libsvm import svmutil,svm #import svmutil from svmutil import * from svm import * from PIL import Image from collections import Counter from imutils import paths import matplotlib.pyplot as plt import json ################################################################################### #Input - # AGGD fit model, takes input as the MSCN Image / Pair-wise Product #Output - best values of image parameters #Defination - used as internal method to measure_ImageQualityScore ################################################################################### def AGGDfit(structdis): # variables to count positive pixels / negative pixels and their squared sum poscount = 0 negcount = 0 possqsum = 0 negsqsum = 0 abssum = 0 poscount = len(structdis[structdis > 0]) # number of positive pixels negcount = len(structdis[structdis < 0]) # number of negative pixels # calculate squared sum of positive pixels and negative pixels possqsum = np.sum(np.power(structdis[structdis > 0], 2)) negsqsum = np.sum(np.power(structdis[structdis < 0], 2)) # absolute squared sum abssum = np.sum(structdis[structdis > 0]) + np.sum(-1 * structdis[structdis < 0]) # calculate left sigma variance and right sigma variance lsigma_best = np.sqrt((negsqsum/negcount)) rsigma_best = np.sqrt((possqsum/poscount)) gammahat = lsigma_best/rsigma_best # total number of pixels - totalcount totalcount = structdis.shape[1] * structdis.shape[0] rhat = m.pow(abssum/totalcount, 2)/((negsqsum + possqsum)/totalcount) rhatnorm = rhat * (m.pow(gammahat, 3) + 1) * (gammahat + 1)/(m.pow(m.pow(gammahat, 2) + 1, 2)) prevgamma = 0 prevdiff = 1e10 sampling = 0.001 gam = 0.2 # vectorized function call for best fitting parameters vectfunc = np.vectorize(func, otypes = [np.float], cache = False) # calculate best fit params gamma_best = vectfunc(gam, prevgamma, prevdiff, sampling, rhatnorm) return [lsigma_best, rsigma_best, gamma_best] def func(gam, prevgamma, prevdiff, sampling, rhatnorm): while(gam < 10): r_gam = tgamma(2/gam) * tgamma(2/gam) / (tgamma(1/gam) * tgamma(3/gam)) diff = abs(r_gam - rhatnorm) if(diff > prevdiff): break prevdiff = diff prevgamma = gam gam += sampling gamma_best = prevgamma return gamma_best def compute_features(img): scalenum = 2 feat = [] # make a copy of the image im_original = img.copy() # scale the images twice for itr_scale in range(scalenum): im = im_original.copy() # normalize the image im = im / 255.0 # calculating MSCN coefficients mu = cv2.GaussianBlur(im, (7, 7), 1.166) mu_sq = mu * mu sigma = cv2.GaussianBlur(im*im, (7, 7), 1.166) sigma = (sigma - mu_sq)**0.5 # structdis is the MSCN image structdis = im - mu structdis /= (sigma + 1.0/255) # calculate best fitted parameters from MSCN image best_fit_params = AGGDfit(structdis) # unwrap the best fit parameters lsigma_best = best_fit_params[0] rsigma_best = best_fit_params[1] gamma_best = best_fit_params[2] # append the best fit parameters for MSCN image feat.append(gamma_best) feat.append((lsigma_best*lsigma_best + rsigma_best*rsigma_best)/2) # shifting indices for creating pair-wise products shifts = [[0,1], [1,0], [1,1], [-1,1]] # H V D1 D2 for itr_shift in range(1, len(shifts) + 1): OrigArr = structdis reqshift = shifts[itr_shift-1] # shifting index # create transformation matrix for warpAffine function M = np.float32([[1, 0, reqshift[1]], [0, 1, reqshift[0]]]) ShiftArr = cv2.warpAffine(OrigArr, M, (structdis.shape[1], structdis.shape[0])) Shifted_new_structdis = ShiftArr Shifted_new_structdis = Shifted_new_structdis * structdis # shifted_new_structdis is the pairwise product # best fit the pairwise product best_fit_params = AGGDfit(Shifted_new_structdis) lsigma_best = best_fit_params[0] rsigma_best = best_fit_params[1] gamma_best = best_fit_params[2] constant = m.pow(tgamma(1/gamma_best), 0.5)/m.pow(tgamma(3/gamma_best), 0.5) meanparam = (rsigma_best - lsigma_best) * (tgamma(2/gamma_best)/tgamma(1/gamma_best)) * constant # append the best fit calculated parameters feat.append(gamma_best) # gamma best feat.append(meanparam) # mean shape feat.append(m.pow(lsigma_best, 2)) # left variance square feat.append(m.pow(rsigma_best, 2)) # right variance square # resize the image on next iteration im_original = cv2.resize(im_original, (0,0), fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC) return feat def img_MeasureImageQuality(dataset_directory): """ #################################################################################### #Input - img_path #Output - Quality index of input image #Defination - function to calculate BRISQUE quality score in range of 0 and 100 [0:good;100:bad] #################################################################################### """ imgfile_dict = {} for file in os.listdir(dataset_directory): if (file.endswith(".jfif") or file.endswith(".png") or file.endswith(".jpg") or file.endswith(".jpeg")): filename = os.path.join(dataset_directory , file) if os.path.isfile(filename)==False: sys.exit() file_extension = os.path.splitext(filename)[1] if file_extension==".jfif": extension=".jfif" if file_extension==".png": extension=".png" if file_extension==".jpg": extension=".jpg" if file_extension==".jpeg": extension=".jpeg" if (extension not in [".jpg",".jpeg",".jfif",".png"]): sys.exit() try: # read image from given path dis = cv2.imread(filename, 1) if(dis is None): sys.exit(0) # convert to gray scale dis = cv2.cvtColor(dis, cv2.COLOR_BGR2GRAY) # compute feature vectors of the image features = compute_features(dis) # rescale the brisqueFeatures vector from -1 to 1 x = [0] # pre loaded lists from C++ Module to rescale brisquefeatures vector to [-1, 1] min_= [0.336999 ,0.019667 ,0.230000 ,-0.125959 ,0.000167 ,0.000616 ,0.231000 ,-0.125873 ,0.000165 ,0.000600 ,0.241000 ,-0.128814 ,0.000179 ,0.000386 ,0.243000 ,- 0.133080 ,0.000182 ,0.000421 ,0.436998 ,0.016929 ,0.247000 ,-0.200231 ,0.000104 ,0.000834 ,0.257000 ,-0.200017 ,0.000112 ,0.000876 ,0.257000 ,-0.155072 , 0.000112 ,0.000356 ,0.258000 ,-0.154374 ,0.000117 ,0.000351] max_= [9.999411, 0.807472, 1.644021, 0.202917, 0.712384, 0.468672, 1.644021, 0.169548, 0.713132, 0.467896, 1.553016, 0.101368, 0.687324, 0.533087, 1.554016, 0.101000 , 0.689177, 0.533133, 3.639918, 0.800955, 1.096995, 0.175286, 0.755547, 0.399270, 1.095995, 0.155928, 0.751488, 0.402398, 1.041992, 0.093209, 0.623516, 0.532925, 1.042992, 0.093714, 0.621958, 0.534484] # append the rescaled vector to x for i in range(0, 36): min = min_[i] max = max_[i] x.append(-1 + (2.0/(max - min) * (features[i] - min))) modelPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'allmodel.txt') # load model model = svmutil.svm_load_model(modelPath) # create svm node array from python list x, idx = gen_svm_nodearray(x[1:], isKernel=(model.param.kernel_type == PRECOMPUTED)) x[36].index = -1 # set last index to -1 to indicate the end. # get important parameters from model svm_type = model.get_svm_type() is_prob_model = model.is_probability_model() nr_class = model.get_nr_class() if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC): # here svm_type is EPSILON_SVR as it's regression problem nr_classifier = 1 dec_values = (c_double * nr_classifier)() # calculate the quality score of the image using the model and svm_node_array qualityscore = svmutil.libsvm.svm_predict_probability(model, x, dec_values) imgfile_dict[file] = round(qualityscore,2) #print ("Quality Score of the given image is: ", qualityscore, "[0:Good;100:Bad]") except: pass finally: warnings.simplefilter("ignore") #print(imgfile_dict) return imgfile_dict # calculate moode def mode(arr): if arr==[]: return None else: return max(set(arr), key=arr.count) def img_EDA(dataset_directory): """ #################################################################################### #Input - dataset_directory with all type of Images #Output - mean,median and mode image size, channels type, extensions, recommendation of images etc #Defination - img_EDA takes the all images and print the EDA results #################################################################################### """ imgeda_dict = {} # check input directory if os.path.isdir(dataset_directory)==False: print("folder does not exist") sys.exit() width_list=[] height_list=[] k=[] c=[] cnum=[] v=[] ext=[] cnt=0 for item in os.listdir(dataset_directory): if (item.endswith(".jfif") or item.endswith(".png") or item.endswith(".jpg") or item.endswith(".jpeg")): if os.path.isfile(os.path.join(dataset_directory , item)): im = Image.open(os.path.join(dataset_directory , item)) c.append(im.mode) cnum.append(len(im.mode)) width_list.append(im.width) height_list.append(im.height) k.append(im.size) v.append(im.width*im.height) f, e = os.path.splitext(os.path.join(dataset_directory , item)) ext.append(e) cnt=cnt+1 # calculate biggest and smallest image img_dict={} for key, val in zip(k, v): img_dict[key] = val max_key = max(img_dict, key=img_dict.get) #max_key min_key = min(img_dict, key=img_dict.get) #min_key imgeda_dict['Channels'] = set(c) imgeda_dict['Extensions'] = set(ext) imgeda_dict['Total_Images'] = cnt imgeda_dict['Smallest_Image'] = min_key imgeda_dict['Largest_Image'] = max_key imgeda_dict['Mean_Width'] = int(np.mean(width_list)) imgeda_dict['Mean_Height'] = int(np.mean(height_list)) imgeda_dict['Median_Width'] = int(np.median(width_list)) imgeda_dict['Median_Height'] = int(np.median(height_list)) imgeda_dict['Mode_Width'] = int(mode(width_list)) imgeda_dict['Mode_Height'] = int(mode(height_list)) imgeda_dict['Recomended_Mean_Width_Height'] = (int(np.mean(width_list)),int(np.mean(height_list))) imgeda_dict['Recomended_Median_Width_Height'] = (int(np.median(width_list)),int(np.median(height_list))) imgeda_dict['Recomended_Mode_Width_Height'] = (int(mode(width_list)),int(mode(height_list))) imgeda_dict['Size_Distribution'] = dict(Counter(k).items()) imgeda_dict['Channel_Mean'] = np.mean(cnum) imgeda_dict['Channel_Standard_Deviation'] = np.std(cnum) ''' print('*-----------------------<<< RESULTS >>>-------------------------*') print() print('%-30s | ' % 'Channels', set(c)) print('%-30s | ' % 'Extensions', set(ext)) print('*---------------------------------------------------------------*') print('%-30s | ' % 'Total Images', cnt) print('%-30s | ' % 'Smallest Image', min_key) print('%-30s | ' % 'Largest Image', max_key) print('*---------------------------------------------------------------*') print('%-30s | ' % 'Mean Width', int(np.mean(width_list))) print('%-30s | ' % 'Mean Height', int(np.mean(height_list))) print('*---------------------------------------------------------------*') print('%-30s | ' % 'Median Width', int(np.median(width_list))) print('%-30s | ' % 'Median Height', int(np.median(height_list))) print('*---------------------------------------------------------------*') print('%-30s | ' % 'Mode Width', int(mode(width_list))) print('%-30s | ' % 'Mode Height', int(mode(height_list))) print('*---------------------------------------------------------------*') print('%-30s | ' % 'recommended size by mean(w,h)',(int(np.mean(width_list)),int(np.mean(height_list)))) print('*---------------------------------------------------------------*') print('%-30s | ' % 'recommended size by median(w,h)',(int(np.median(width_list)),int(np.median(height_list)))) print('*---------------------------------------------------------------*') print('%-30s | ' % 'recommended size by mode(w,h)',(int(mode(width_list)),int(mode(height_list)))) print('*---------------------------------------------------------------*') print('%-30s | ' % 'distribution of sizes',dict(Counter(k).items()) ) print('*---------------------------------------------------------------*') print('%-30s | ' % 'channel mean',np.mean(cnum)) print('%-30s | ' % 'channel standard deviation',np.std(cnum)) ''' #print(imgeda_dict) return imgeda_dict def dhash(image, hashSize=8): # convert the image to grayscale and resize the grayscale image, # adding a single column (width) so we can compute the horizontal # gradient gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) resized = cv2.resize(gray, (hashSize + 1, hashSize)) # compute the (relative) horizontal gradient between adjacent # column pixels diff = resized[:, 1:] > resized[:, :-1] # convert the difference image to a hash and return it return sum([2 ** i for (i, v) in enumerate(diff.flatten()) if v]) def img_duplicatefinder(dataset_directory): # grab the paths to all images in our input dataset directory and # then initialize our hashes dictionary print("[INFO] computing image hashes...") imagePaths = list(paths.list_images(dataset_directory)) hashes = {} duplimg_list = [] remove_file = 0 # loop over our image paths for imagePath in imagePaths: # load the input image and compute the hash image = cv2.imread(imagePath) h = dhash(image) # grab all image paths with that hash, add the current image # path to it, and store the list back in the hashes dictionary p = hashes.get(h, []) p.append(imagePath) hashes[h] = p # loop over the image hashes for (h, hashedPaths) in hashes.items(): # check to see if there is more than one image with the same hash if len(hashedPaths) > 1: #print(hashedPaths) duplimg_list.append(hashedPaths) return duplimg_list def img_plot_colour_hist(dataset_directory): import io, base64, urllib red_values = []; green_values = []; blue_values = []; all_channels = [] imagePaths = list(paths.list_images(dataset_directory)) for imagePath in imagePaths: img = np.array(Image.open(imagePath)) red_values.append(np.mean(img[:, :, 0])) green_values.append(np.mean(img[:, :, 1])) blue_values.append(np.mean(img[:, :, 2])) all_channels.append(np.mean(img)) _, axes = plt.subplots(ncols=4, nrows=1, constrained_layout=True, figsize=(16, 3), sharey=True) for ax, column, vals, c in zip( axes, ['red', 'green', 'blue', 'all colours'], [red_values, green_values, blue_values, all_channels], 'rgbk' ): ax.hist(vals, bins=100, color=c) ax.set_title(f'{column} hist') plt.suptitle("Image Dataset Colour Distribution") buf = io.BytesIO() plt.savefig(buf, format='png') buf.seek(0) string = base64.b64encode(buf.read()) uri = 'data:image/png;base64,' + urllib.parse.quote(string) return uri
predict.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.utils import to_categorical from keras.preprocessing import image import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from keras.utils import to_categorical from Fkeras.layers import Input from keras.models import Model from keras.optimizers import Adam from keras.applications import VGG16 from tensorflow.keras.callbacks import EarlyStopping from sklearn.metrics import classification_report,confusion_matrix,precision_recall_curve import seaborn as sns def PredictCAST(test_csv, test_dataset_directory, load_model_dir, model_name, hparams_config_file): hyperparam_config = hparams_config_file['img_classifier'] print("[Info] loading imagenet weights...") #baseModel = keras.applications.ResNet101(weights="imagenet", include_top=False, input_tensor=Input(shape=(128, 128, 3))) if model_name == "densenet": print('Loading Densenet model') baseModel = keras.applications.DenseNet121(weights="imagenet", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #98 elif model_name == "inception": print('Loading Inception model') baseModel = keras.applications.InceptionV3(weights="imagenet", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #97 headModel = baseModel.output headModel = Flatten(name="flatten")(headModel) headModel = Dense(1024, activation='relu')(headModel) headModel = Dropout(0.5)(headModel) headModel = Dense(2, activation='sigmoid')(headModel) model = Model(inputs=baseModel.input, outputs=headModel) print("[INFO] compiling model...") opt = Adam(lr=hyperparam_config['lr']) model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) model.load_weights(os.path.join(load_model_dir, model_name)) #model.load_weights(load_model_dir) test_image = [] for i in range(test_csv.shape[0]): img = image.load_img(test_dataset_directory + '/' + str(test_csv['file_name'][i]), target_size=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']), grayscale=False) img = image.img_to_array(img) img = img/255 test_image.append(img) test_images = np.array(test_image) test_labels = test_csv['class'].values test_labels = to_categorical(test_labels) # making predictions prediction = model.predict(test_images) prediction = np.argmax(prediction,axis=1) print('Classification Report : ') print(classification_report(test_csv['class'],prediction)) sns.heatmap(confusion_matrix(test_csv['class'],prediction),annot=True) plt.show() print('Confusion matrix : ') print(confusion_matrix(test_csv['class'],prediction)) print("[INFO] Evaluating model accuracy and loss...Take some moment...") test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) print('\nTest accuracy:', test_acc) print('\nTest loss:', test_loss) print("Prediction Completed...")
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
predict_single.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.utils import to_categorical from keras.preprocessing import image import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from keras.utils import to_categorical from keras.layers import Input from keras.models import Model from keras.optimizers import Adam from keras.applications import VGG16 from tensorflow.keras.callbacks import EarlyStopping from sklearn.metrics import classification_report,confusion_matrix,precision_recall_curve import seaborn as sns import cv2 def PredictCAST(test_image, load_model_dir, model_name, hparams_config_file): hyperparam_config = hparams_config_file['img_classifier'] print("[Info] loading imagenet weights...") #baseModel = keras.applications.ResNet101(weights="imagenet", include_top=False, input_tensor=Input(shape=(128, 128, 3))) if model_name == "densenet": print('Loading Densenet model') baseModel = keras.applications.DenseNet121(weights="imagenet", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #98 elif model_name == "inception": print('Loading Inception model') baseModel = keras.applications.InceptionV3(weights="imagenet", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #97 headModel = baseModel.output headModel = Flatten(name="flatten")(headModel) headModel = Dense(1024, activation='relu')(headModel) headModel = Dropout(0.5)(headModel) headModel = Dense(2, activation='sigmoid')(headModel) model = Model(inputs=baseModel.input, outputs=headModel) print("[INFO] compiling model...") opt = Adam(lr=hyperparam_config['lr']) model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) model.load_weights(os.path.join(load_model_dir, model_name)) img = cv2.imread(test_image) img = cv2.resize(img, (hyperparam_config['img_width'],hyperparam_config['img_height'])) orig = img.copy() img = image.img_to_array(img) img = np.expand_dims(img, axis=0) img = img/255 print("[Info] predicting output") #prediction = model.predict_classes(img) prediction = model.predict(img) prediction = np.argmax(prediction,axis=1) print(prediction) if (prediction<0.5): print("def_front") cv2.putText(orig, "def_front", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) else: print("ok_front") cv2.putText(orig, "ok_front", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) plt.imshow(orig) plt.axis('off') plt.show() print("Prediction Completed...")
incMachineLearning.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import warnings warnings.filterwarnings('ignore') import logging import sklearn from random import sample from numpy.random import uniform import numpy as np import math import pickle import os import json from math import isnan from sklearn.preprocessing import binarize from sklearn.preprocessing import LabelEncoder import pandas as pd from sklearn.preprocessing import LabelBinarizer from sklearn.model_selection import train_test_split from incremental.incClassificationModel import incClassifierModel from incremental.incRegressionModel import incRegressionModel class incMachineLearning(object): def __init__(self,mlobj): self.features=[] self.mlobj=mlobj self.log = logging.getLogger('eion') def startLearning(self,mlconfig,modelType,modelParams,modelList,scoreParam,features,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps): model = 'None' params = 'None' score = 0xFFFF estimator = None model_tried = '' threshold = -1 pscore = -1 rscore = -1 topics = {} if(targetColumn != ''): targetData = dataFrame[targetColumn] datacolumns=list(dataFrame.columns) if targetColumn in datacolumns: datacolumns.remove(targetColumn) scoreParam = self.mlobj.setScoreParams(scoreParam,modelType,categoryCountList) self.log.info('\n-------------- Training ML: Start --------------') model_type,model,params, score, estimator,model_tried,xtrain,ytrain,xtest,ytest,threshold,pscore,rscore,method,incObj=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps) self.log.info('-------------- Training ML: End --------------\n') filename = os.path.join(deployLocation,'production','model',model+'.pkl') saved_model = model+'.pkl' pickle.dump(estimator, open(filename, 'wb')) df_test = xtest.copy() df_test.reset_index(inplace = True,drop=True) trainPredictedData = incObj.bestTrainPredictedData predictedData = incObj.bestPredictedData try: if(model_type == 'Classification'): self.log.info('\n--------- Performance Matrix with Train Data ---------') train_matrix = self.mlobj.getClassificationPerformaceMatrix(ytrain,trainPredictedData,labelMaps) self.log.info('--------- Performance Matrix with Train Data End ---------\n') self.log.info('\n--------- Performance Matrix with Test Data ---------') performancematrix = self.mlobj.getClassificationPerformaceMatrix(ytest,predictedData,labelMaps) ytest.reset_index(inplace=True,drop=True) df_test['actual'] = ytest df_test['predict'] = predictedData self.log.info('--------- Performance Matrix with Test Data End ---------\n') matrix = performancematrix elif(model_type == 'Regression'): self.log.info('\n--------- Performance Matrix with Train Data ---------') train_matrix = self.mlobj.get_regression_matrix(ytrain, trainPredictedData) self.log.info('--------- Performance Matrix with Train Data End ---------\n') self.log.info('\n--------- Performance Matrix with Test Data ---------') matrix = self.mlobj.get_regression_matrix(ytest, predictedData) ytest.reset_index(inplace=True, drop=True) df_test['actual'] = ytest df_test['predict'] = predictedData self.log.info('--------- Performance Matrix with Test Data End ---------\n') except Exception as Inst: self.log.info('--------- Error Performance Matrix ---------\n') self.log.info(str(Inst)) df_test['predict'] = predictedData matrix = "" train_matrix = "" self.log.info('--------- Performance Matrix with Test Data End ---------\n') df_test.to_csv(predicted_data_file) return 'Success',model_type,model,saved_model,matrix,train_matrix,xtrain.shape,model_tried,score,filename,self.features,threshold,pscore,rscore,method,estimator,xtrain,ytrain,xtest,ytest,topics,params def startLearnerModule(self,mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps): matrix = '' threshold = -1 pscore = -1 rscore = -1 datacolumns=list(xtrain.columns) if targetColumn in datacolumns: datacolumns.remove(targetColumn) self.features =datacolumns self.log.info('-------> Features Used For Training the Model: '+(str(self.features))[:500]) xtrain = xtrain[self.features] xtest = xtest[self.features] method = mlconfig['optimizationMethod'] method = method.lower() geneticParam = '' optimizationHyperParameter = mlconfig['optimizationHyperParameter'] cvSplit = optimizationHyperParameter['trainTestCVSplit'] nIter = int(optimizationHyperParameter['iterations']) if(method.lower() == 'genetic'): geneticParam = optimizationHyperParameter['geneticparams'] scoreParam = scoreParam if 'thresholdTunning' in mlconfig: thresholdTunning = mlconfig['thresholdTunning'] else: thresholdTunning = 'NA' if cvSplit == "": cvSplit =None else: cvSplit =int(cvSplit) if modelType == 'classification': model_type = "Classification" MakeFP0 = False MakeFN0 = False if(len(categoryCountList) == 2): if(thresholdTunning.lower() == 'fp0'): MakeFP0 = True elif(thresholdTunning.lower() == 'fn0'): MakeFN0 = True noOfClasses= len(labelMaps) incObjClf = incClassifierModel(noOfClasses,modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,modelType,MakeFP0,MakeFN0,deployLocation) model, params, score, estimator,model_tried,threshold,pscore,rscore = incObjClf.firstFit() incObj = incObjClf elif modelType == 'regression': model_type = "Regression" incObjReg = incRegressionModel(modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,deployLocation) model,params,score,estimator,model_tried = incObjReg.firstFit() incObj = incObjReg return model_type,model,params, score, estimator,model_tried,xtrain,ytrain,xtest,ytest,threshold,pscore,rscore,method, incObj
incRegressionModel.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' from learner.optimizetechnique import OptimizationTq from learner.parameters import parametersDefine import time import logging import os import sys from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error,make_scorer from sklearn.metrics import mean_squared_error from learner.aion_matrix import aion_matrix class incRegressionModel(): def __init__(self,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,deployLocation): self.modelList =modelList self.params =params self.trainX =trainX self.trainY =trainY self.testX = testX self.testY = testY self.method =method self.scoreParam=scoreParam self.cvSplit=cvSplit self.numIter=numIter self.geneticParam=geneticParam self.log = logging.getLogger('eion') self.deployLocation = deployLocation self.bestTrainPredictedData = None self.bestPredictedData = None self.AlgorithmNames={'Online Linear Regression':'Online Linear Regression', 'Online Decision Tree Regressor':'Online Decision Tree Regressor', 'Online KNN Regressor':'Online KNN Regressor'} self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()} def firstFit(self): bestModel='' bestParams={} import sys bestScore=-sys.float_info.max #bugfix 11656 scoredetails = '' self.log.info('\n---------- Regression Model has started ----------') try: for modelName in self.modelList: if modelName not in self.params: continue paramSpace=self.params[modelName] algoName = self.AlgorithmNames[modelName] from incremental.riverML import riverML riverMLObj = riverML() self.log.info("-------> Model Name: "+str(modelName)) start = time.time() model, modelParams, estimator, trainPredictedData = riverMLObj.startLearn('regression',algoName,paramSpace,self.trainX, self.trainY) modelParams = str(modelParams) executionTime=time.time() - start self.log.info('---------> Total Execution: '+str(executionTime)) predictedData = riverMLObj.getPrediction(estimator,self.testX) if 'neg_mean_squared_error' in self.scoreParam: meanssquatederror = mean_squared_error(self.testY,predictedData) score = meanssquatederror elif 'neg_root_mean_squared_error' in self.scoreParam: rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False) score = rootmeanssquatederror elif 'neg_mean_absolute_error' in self.scoreParam: meanabsoluteerror=mean_absolute_error(self.testY,predictedData) score = meanabsoluteerror elif 'r2' in self.scoreParam: r2score=r2_score(self.testY,predictedData) score = round(r2score*100, 2) if self.scoreParam == "r2": if score>bestScore: bestScore =score bestModel =model bestParams=modelParams bestEstimator=estimator self.bestTrainPredictedData = trainPredictedData self.bestPredictedData = predictedData else: if abs(score) < bestScore or bestScore == -sys.float_info.max: bestScore =abs(score) bestModel =model bestParams=modelParams bestEstimator=estimator self.bestTrainPredictedData = trainPredictedData self.bestPredictedData = predictedData metrices = {} metrices["score"] = score if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","Score":'+str(abs(score))+'}' self.log.info('Status:- |... ML Algorithm applied: '+modelName) self.log.info("Status:- |... Testing Score: "+str(score)) self.log.info('---------- Regression Model End ---------- \n') self.log.info('\n------- Best Model and its parameters -------------') self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2))) self.log.info("-------> Best Name: "+str(bestModel)) self.log.info("-------> Best Score: "+str(bestScore)) return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails except Exception as inst: self.log.info( '\n-----> regressionModel failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
incClassificationModel.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import time import os import sys import numpy as np from sklearn.metrics import confusion_matrix from sklearn.metrics import recall_score from sklearn.metrics import precision_score from sklearn.preprocessing import binarize from learner.optimizetechnique import OptimizationTq from learner.parameters import parametersDefine import logging from learner.aion_matrix import aion_matrix # apply threshold to positive probabilities to create labels def to_labels(pos_probs, threshold): return (pos_probs >= threshold).astype('int') class incClassifierModel(): def __init__(self,noOfClasses,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,modelType,MakeFP0,MakeFN0,deployLocation): self.noOfClasses = noOfClasses self.modelList =modelList self.params =params self.trainX =trainX self.X =trainX self.trainY =trainY self.testX = testX self.testY = testY self.method =method self.scoreParam=scoreParam self.cvSplit=cvSplit self.numIter=numIter self.geneticParam=geneticParam self.MakeFP0= MakeFP0 self.MakeFN0=MakeFN0 self.log = logging.getLogger('eion') self.modelType = modelType self.deployLocation = deployLocation self.isRiverModel = False self.AlgorithmNames={'Online Logistic Regression':'Online Logistic Regression', 'Online Softmax Regression':'Online Softmax Regression', 'Online Decision Tree Classifier':'Online Decision Tree Classifier', 'Online KNN Classifier':'Online KNN Classifier'} self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()} def check_threshold(self,estimator,testX,testY,threshold_range,checkParameter,modelName): thresholdx = -1 for threshold in threshold_range: predictedData = estimator.predict_proba(testX) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold)#bug 12437 p_score = precision_score(testY, predictedData) r_score = recall_score(testY, predictedData) tn, fp, fn, tp = confusion_matrix(testY, predictedData).ravel() if(checkParameter.lower() == 'fp'): if fp == 0: if(p_score == 1): thresholdx = threshold self.log.info('---------------> Best Threshold:'+str(threshold)) self.log.info('---------------> Best Precision:'+str(p_score)) self.log.info('---------------> Best Recall:'+str(r_score)) self.log.info('---------------> TN:'+str(tn)) self.log.info('---------------> FP:'+str(fp)) self.log.info('---------------> FN:'+str(fn)) self.log.info('---------------> TP:'+str(tp)) break if(checkParameter.lower() == 'fn'): if fn == 0: if(r_score == 1): thresholdx = threshold self.log.info('---------------> Best Threshold:'+str(threshold)) self.log.info('---------------> Best Precision:'+str(p_score)) self.log.info('---------------> Best Recall:'+str(r_score)) self.log.info('---------------> TN:'+str(tn)) self.log.info('---------------> FP:'+str(fp)) self.log.info('---------------> FN:'+str(fn)) self.log.info('---------------> TP:'+str(tp)) break return(thresholdx,p_score,r_score) def getBestModel(self,fp0,fn0,threshold,bestthreshold,rscore,brscore,pscore,bpscore,tscore,btscore): cmodel = False if(threshold != -1): if(bestthreshold == -1): cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif fp0: if rscore > brscore: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif rscore == brscore: if tscore > btscore or btscore == -0xFFFF: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif fn0: if pscore > bpscore: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore elif pscore == bpscore: if tscore > btscore or btscore == -0xFFFF: cmodel = True bestthreshold = threshold brscore = rscore bpscore = pscore btscore = tscore else: if tscore > btscore or btscore == -0xFFFF: cmodel = True btscore = tscore else: if(bestthreshold == -1): if tscore > btscore or btscore == -0xFFFF: cmodel = True btscore = tscore return cmodel,btscore,bestthreshold,brscore,bpscore def firstFit(self): bestModel='None' bestParams={} bestScore=-0xFFFF bestEstimator = 'None' scoredetails = '' threshold = -1 bestthreshold = -1 precisionscore =-1 bestprecisionscore=-1 recallscore = -1 bestrecallscore=-1 self.bestTrainPredictedData = None self.bestPredictedData = None self.log.info('\n---------- ClassifierModel has started ----------') objClf = aion_matrix() try: for modelName in self.modelList: paramSpace=self.params[modelName] algoName = self.AlgorithmNames[modelName] from incremental.riverML import riverML riverMLObj = riverML() self.log.info("-------> Model Name: "+str(modelName)) start = time.time() model, modelParams, estimator, trainPredictedData = riverMLObj.startLearn('classification',algoName,paramSpace,self.trainX, self.trainY, self.noOfClasses) modelParams = str(modelParams) predictedData = riverMLObj.getPrediction(estimator,self.testX) executionTime=time.time() - start self.testY.reset_index(inplace=True, drop=True) score = objClf.get_score(self.scoreParam,self.testY.values.flatten(),predictedData.values.flatten()) self.log.info(str(score)) metrices = {} metrices["score"] = score threshold = -1 precisionscore = precision_score(self.testY, predictedData, average='macro') recallscore = recall_score(self.testY, predictedData, average='macro') self.log.info('---------> Total Execution: '+str(executionTime)) if(scoredetails != ''): scoredetails += ',' scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","Score":'+str(score)+'}' status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore) if status: bestScore =bscore bestModel =model bestParams=modelParams bestEstimator=estimator bestthreshold = threshold bestrecallscore = recallscore bestprecisionscore = precisionscore self.bestTrainPredictedData = trainPredictedData self.bestPredictedData = predictedData self.log.info('Status:- |... ML Algorithm applied: '+modelName) self.log.info("Status:- |... Testing Score: "+str(score)) self.log.info('---------- ClassifierModel End ---------- \n') self.log.info('\n------- Best Model and its parameters -------------') self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2))) self.log.info("-------> Best Name: "+str(bestModel)) self.log.info("-------> Best Score: "+str(bestScore)) return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails,bestthreshold,bestprecisionscore,bestrecallscore except Exception as inst: self.log.info( '\n-----> ClassifierModel failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
incProfiler.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' #System imports import logging import os import sys import pickle #Sci-Tools imports import numpy as np import pandas as pd from sklearn.preprocessing import LabelEncoder from scipy import stats from word2number import w2n #river imports from river.preprocessing import StatImputer from river import stats, compose, anomaly class incProfiler(): def __init__(self): self.DtypesDic={} self.pandasNumericDtypes=['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] self.allNumberTypeCols = [] #all number type columns self.allNumCols = [] #only numerical columns which includes num features and target if it is numerical self.allCatCols = [] self.numFtrs = [] self.catFtrs = [] self.textFtrs = [] self.textVectorFtrs = [] self.numDiscreteCols = [] self.numContinuousCols = [] self.wordToNumericFeatures=[] self.emptyCols=[] self.missingCols = [] self.targetColumn = "" self.le_dict = {} self.configDict = {} self.incFill = None self.incLabelMapping = None self.incCatEncoder = None self.incScaler = None self.incOutlierRem = None self.log = logging.getLogger('eion') def pickleDump(self, model, path): if model is not None: with open(path, 'wb') as f: pickle.dump(model, f) def saveProfilerModels(self, deployLocation): if isinstance(self.incFill['num_fill'], StatImputer) or isinstance(self.incFill['cat_fill'], StatImputer): self.pickleDump(self.incFill, os.path.join(deployLocation,'production','profiler','incFill.pkl')) self.pickleDump(self.incLabelMapping, os.path.join(deployLocation,'production','profiler','incLabelMapping.pkl')) self.pickleDump(self.incCatEncoder, os.path.join(deployLocation,'production','profiler','incCatEncoder.pkl')) self.pickleDump(self.incScaler, os.path.join(deployLocation,'production','profiler','incScaler.pkl')) self.pickleDump(self.incOutlierRem, os.path.join(deployLocation,'production','profiler','incOutlierRem.pkl')) def featureAnalysis(self, df, conf_json, targetFeature): try: self.log.info('-------> Remove Duplicate Rows') noofdplicaterows = df.duplicated(keep='first').sum() df = df.drop_duplicates(keep="first") df = df.reset_index(drop=True) self.log.info('Status:- |... Duplicate row treatment done: '+str(noofdplicaterows)) self.log.info(df.head(5)) self.log.info( '\n----------- Inspecting Features -----------') ctn_count = 0 df = df.replace('-', np.nan) df = df.replace('?', np.nan) dataFDtypes=self.dataFramecolType(df) numerical_ratio = float(conf_json['numericFeatureRatio']) categoricalMaxLabel = int(conf_json['categoryMaxLabel']) indexFeatures = [] numOfRows = df.shape[0] dataCols = df.columns for item in dataFDtypes: if(item[1] == 'object'): filteredDf,checkFlag = self.smartFilter(item[0],df,numerical_ratio) if(checkFlag): self.wordToNumericFeatures.append(item[0]) self.log.info('----------> Data Type Converting to numeric :Yes') try: df[item[0]]=filteredDf[item[0]].astype(float) except: pass ctn_count = ctn_count+1 else: count = (df[item[0]] - df[item[0]].shift() == 1).sum() if((numOfRows - count) == 1): self.log.info( '-------> Feature :'+str(item[0])) self.log.info('----------> Sequence Feature') indexFeatures.append(item[0]) self.configDict['wordToNumCols'] = self.wordToNumericFeatures self.configDict['emptyFtrs'] = indexFeatures self.log.info('Status:- |... Feature inspection done for numeric data: '+str(ctn_count)+' feature(s) converted to numeric') self.log.info('Status:- |... Feature word to numeric treatment done: '+str(self.wordToNumericFeatures)) self.log.info( '----------- Inspecting Features End -----------\n') except Exception as inst: self.log.info("Error in Feature inspection: "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) try: self.log.info('\n---------- Dropping Index features ----------') self.log.info('Index Features to remove '+str(indexFeatures)) if len(indexFeatures) > 0: dataCols = list(set(dataCols) - set(indexFeatures)) for empCol in indexFeatures: self.log.info('-------> Drop Feature: '+empCol) df = df.drop(columns=[empCol]) self.log.info('---------- Dropping Index features End----------\n') dataFDtypes=self.dataFramecolType(df) categoricalMaxLabel = int(conf_json['categoryMaxLabel']) for item in dataFDtypes: self.DtypesDic[item[0]] = item[1] nUnique=len(df[item[0]].unique().tolist()) if item[1] in self.pandasNumericDtypes: self.allNumberTypeCols.append(item[0]) if nUnique >= categoricalMaxLabel: self.allNumCols.append(item[0]) #pure numerical if item[1] in ['int16', 'int32', 'int64']: self.numDiscreteCols.append(item[0]) elif item[1] in ['float16', 'float32', 'float64']: self.numContinuousCols.append(item[0]) else: self.allCatCols.append(item[0]) elif item[1] != 'bool': if (nUnique >= categoricalMaxLabel) and targetFeature != item[0]: self.textFtrs.append(item[0]) else: col = item[0] if (max(df[col].astype(str).str.split().str.len()) > 10) and targetFeature != item[0]: self.textFtrs.append(item[0]) else: self.allCatCols.append(item[0]) else: self.allCatCols.append(item[0]) misval_ratio = float(conf_json['misValueRatio']) self.configDict['misval_ratio'] = misval_ratio missingCols, emptyCols = self.getMissingVals(df, dataCols, misval_ratio) if targetFeature in emptyCols: raise Exception('Target column '+str(targetFeature)+' cannot be empty') dataCols = list(set(dataCols) - set(emptyCols)) self.log.info('\n---------- Dropping empty features ----------') for empCol in emptyCols: self.log.info('-------> Drop Feature: '+empCol) df = df.drop(columns=[empCol]) self.log.info('---------- Dropping empty features End----------\n') self.log.info('Status:- |... Empty feature treatment done: '+str(len(emptyCols))+' empty feature(s) found') self.log.info('-------> Data Frame Shape After Dropping (Rows,Columns): '+str(df.shape)) self.allNumCols = list(set(self.allNumCols) - set(emptyCols)) self.allCatCols = list(set(self.allCatCols) - set(emptyCols)) self.textFtrs = list(set(self.textFtrs) - set(emptyCols)) missingValFtrs = list(set(missingCols) - set(emptyCols)) self.log.info(str(len(missingValFtrs))+' feature(s) found with missing value(s)') self.log.info('\n-------> Numerical continuous columns :'+(str(self.numContinuousCols))[:500]) self.log.info('-------> Numerical discrete columns :'+(str(self.numDiscreteCols))[:500]) self.log.info('-------> Non numerical columns :'+(str(self.allCatCols))[:500]) self.log.info('-------> Text columns :'+(str(self.textFtrs))[:500]) except Exception as inst: self.log.info("Error in segregating numerical and categorical columns: "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return df, missingValFtrs, emptyCols, dataCols, self.allNumCols, self.allCatCols, self.textFtrs def createIncProfiler(self, df, conf_json, allNumCols, numFtrs, allCatCols, textFtrs, missingValFtrs): self.incLabelMapping = None catFtrs = allCatCols.copy() #LabelEncoding if self.targetColumn in allCatCols: catFtrs.remove(self.targetColumn) self.incLabelMapping = LabelEncoder() df[self.targetColumn] = df[self.targetColumn].apply(str) self.incLabelMapping.fit(df[self.targetColumn]) self.le_dict = dict(zip(self.incLabelMapping.classes_, self.incLabelMapping.transform(self.incLabelMapping.classes_))) self.log.info('----------> Encoded Values of Target Labels: '+(str(self.le_dict))[:500]) #self.incFill --> {num_fill:SI/0.0/'drop', cat_fill:SI/0.0/'drop'} #fill self.incFill = {} self.incCatEncoder = None self.incScaler = None self.incOutlierRem = None num_fill_method = 'Mean' for x in list(conf_json['numericalFillMethod'].keys()): if conf_json['numericalFillMethod'][x] == 'True': num_fill_method = x break if num_fill_method.lower() =='mean': num_fill = [(col, stats.Mean()) for col in allNumCols] self.incFill['num_fill'] = StatImputer(*num_fill) elif num_fill_method.lower() =='min': num_fill = [(col, stats.Min()) for col in allNumCols] self.incFill['num_fill'] = StatImputer(*num_fill) elif num_fill_method.lower() == 'max': num_fill = [(col, stats.Max()) for col in allNumCols] self.incFill['num_fill'] = StatImputer(*num_fill) elif num_fill_method.lower() =='zero': self.incFill['num_fill'] = 'zero' elif num_fill_method.lower() =='drop': self.incFill['num_fill'] = 'drop' else: num_fill = [(col, stats.Mean()) for col in allNumCols] self.incFill['num_fill'] = StatImputer(*num_fill) cat_fill_method = 'Mode' for x in list(conf_json['categoricalFillMethod'].keys()): if conf_json['categoricalFillMethod'][x] == 'True': cat_fill_method = x break if cat_fill_method.lower() =='zero': self.incFill['cat_fill'] = 'zero' elif cat_fill_method.lower() == 'mode': cat_fill = [(col, stats.Mode()) for col in allCatCols] self.incFill['cat_fill'] = StatImputer(*cat_fill) elif cat_fill_method.lower() =='drop': self.incFill['cat_fill'] = 'drop' #CatEncoding for x in list(conf_json['categoryEncoding'].keys()): if conf_json['categoryEncoding'][x] == 'True': catEncoder = x break catEncHow = 'Mean' for x in list(conf_json['targetEncodingParams']['how'].keys()): if conf_json['targetEncodingParams']['how'][x] == 'True': catEncHow = x break if self.targetColumn in catFtrs: catFtrs.remove(self.targetColumn) if len(catFtrs) > 0: from river.feature_extraction import TargetAgg if catEncHow.lower() == 'mean': agg_stat = stats.Mean() if catEncHow.lower() == 'bayesianmean' or catEncHow.lower() == 'bayesian mean': agg_stat = stats.BayesianMean(prior=0.5, prior_weight=50) self.incCatEncoder = TargetAgg( by=catFtrs[0], how=agg_stat) for col in catFtrs[1:]: self.incCatEncoder += TargetAgg( by=col, how=agg_stat) self.incCatEncoder|= compose.Discard(*catFtrs) #Scaling normalization_status = 'False' normalization_method = "" if 'normalization' in conf_json: nor_supported_methods = conf_json['normalization'] for k in nor_supported_methods.keys(): if conf_json['normalization'][k].lower() == 'true': normalization_status='True' normalization_method =k break if normalization_status.lower() == "true" and len(numFtrs) > 0: from sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler if self.targetColumn in numFtrs: numFtrs.remove(self.targetColumn) if normalization_method.lower() =='standardscaler': self.incScaler = StandardScaler() elif normalization_method.lower() =='minmaxscaler' or normalization_method.lower() =='minmax': self.incScaler = MinMaxScaler() elif normalization_method.lower() =='maxabsscaler' or normalization_method.lower() =='maxabs': self.incScaler = MaxAbsScaler() else: self.incScaler = None #OutlierRemoval outlier_status = 'False' outlier_method = 'None' for x in list(conf_json['outlierDetection'].keys()): if conf_json['outlierDetection'][x] == 'True': outlier_method = x outlier_status = 'True' break if outlier_status and numFtrs: outlierMethodNames = list(conf_json['outlierDetectionParams'].keys()) if outlier_method.lower() == 'oneclasssvm' or outlier_method.lower() == 'one class svm': for x in outlierMethodNames: if x[0].lower() == 'o': key = x break params = conf_json['outlierDetectionParams'][key] self.log.info('<--- one class SVM with quantile filter --->') self.incOutlierRem = anomaly.QuantileFilter(anomaly.OneClassSVM(nu=float(params['nu'])),q=float(params['q'])) elif outlier_method.lower() =='halfspacetrees' or outlier_method.lower() =='half space trees': for x in outlierMethodNames: if x[0].lower() == 'h': key = x break params = conf_json['outlierDetectionParams'][key] self.log.info('<--- Half space trees with quantile filter --->') self.incOutlierRem = anomaly.QuantileFilter(anomaly.HalfSpaceTrees(n_trees=int(params['n_trees']),height=int(params['height']), window_size=int(params['window_size'])) ,q=float(params['q'])) else: self.log.info("No method is provided for outlier analysis") def getMissingVals(self,dataframe,columns,misval_ratio): try: self.log.info( '\n----------- Detecting for Missing Values -----------') nonNAArray=[] numOfRows = dataframe.shape[0] for i in columns: numNa=dataframe.loc[(pd.isna(dataframe[i])),i ].shape[0] nonNAArray.append(tuple([i,numNa])) for item in nonNAArray: numofMissingVals = item[1] if(numofMissingVals !=0): self.log.info('-------> Feature '+str(item[0])) self.log.info('----------> Number of Empty Rows '+str(numofMissingVals)) self.missingCols.append(item[0]) if(numofMissingVals >= numOfRows * misval_ratio): self.log.info('----------> Empty: Yes') self.log.info('----------> Permitted Rows: '+str(int(numOfRows * misval_ratio))) self.emptyCols.append(item[0]) if(len(self.missingCols) !=0): self.log.info( '----------- Detecting for Missing Values End -----------\n') return self.missingCols, self.emptyCols else: self.log.info( '-------> Missing Value Features :Not Any') self.log.info( '----------- Detecting for Missing Values End -----------\n') return self.missingCols, self.emptyCols except Exception as e: self.log.info("getMissingVals failed ==>" +str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return None, None def startIncProfiler(self,df,conf_json,targetFeature,deployLocation,problemType): try: self.targetColumn = targetFeature df, missingValFtrs, emptyFtrs, dataFtrs, allNumCols, allCatCols, textFtrs = self.featureAnalysis(df, conf_json, self.targetColumn) if len(textFtrs)>0: self.log.info('Text Features are not supported. Dropping '+str(textFtrs)[:500]) df = df.drop(columns=textFtrs) catFtrs = allCatCols.copy() numFtrs = allNumCols.copy() if self.targetColumn in catFtrs: catFtrs.remove(self.targetColumn) if targetFeature in allNumCols: numFtrs.remove(targetFeature) self.configDict['targetCol'] = self.targetColumn self.configDict['numFtrs'] = numFtrs self.configDict['catFtrs'] = catFtrs self.configDict['allNumCols'] = allNumCols self.configDict['allCatCols'] = allCatCols self.configDict['allFtrs'] = numFtrs+catFtrs try: self.log.info('\n---------- Creating Incremental profiler models ----------') self.createIncProfiler(df, conf_json, allNumCols, numFtrs, allCatCols, textFtrs, missingValFtrs) self.log.info('\n--------- Incremental profiler models have been created ---------') except Exception as inst: self.log.info("Error in creating Incremental profiler models"+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) raise try: #mvt # if missingValFtrs: if self.incFill['num_fill'] == 'drop': df = df.dropna(axis = 0, subset=allNumCols) self.configDict['num_fill'] = 'drop' elif self.incFill['num_fill'] == 'zero': df[allNumCols] = df[allNumCols].fillna(value = 0.0) self.configDict['num_fill'] = 'zero' else: df = df.astype(object).where(df.notna(), None) df[allNumCols]= df[allNumCols].apply(lambda row: self.apply_river_model(row.to_dict(), self.incFill ['num_fill']), axis='columns') self.configDict['num_fill'] = {col:self.incFill['num_fill'].stats[col].get() for col in allNumCols} if self.incFill['cat_fill'] == 'drop': df = df.dropna(axis = 0, subset=allCatCols) self.configDict['cat_fill'] = 'drop' elif self.incFill['cat_fill'] == 'zero': df[allCatCols] = df[allCatCols].fillna(value = 0.0) self.configDict['cat_fill'] = 'zero' else: df = df.astype(object).where(df.notna(), None) df[allCatCols]= df[allCatCols].apply(lambda row: self.apply_river_model(row.to_dict(), self.incFill['cat_fill']), axis='columns') self.configDict['cat_fill'] = {col:self.incFill['cat_fill'].stats[col].get() for col in allCatCols} self.log.info('\nStatus:- |... Missing value treatment done') except Exception as inst: self.log.info("Error in Missing value treatment "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) raise try: #labelenc if self.incLabelMapping: df[targetFeature] = self.incLabelMapping.transform(df[targetFeature]) # self.configDict['labelMapping'] = self.le_dict except Exception as inst: self.log.info("Error in Label mapping "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) raise try: #catenc if self.incCatEncoder: self.log.info('\n--------- Converting Non Numerical Categorical Features to Numerical Features ---------') self.encTarget = targetFeature if problemType.lower() == 'regression': from sklearn.preprocessing import StandardScaler sc = StandardScaler() self.encTarget = 'scaledTarget' df['scaledTarget'] = sc.fit_transform(df[targetFeature].to_numpy().reshape(-1,1)) encCols = catFtrs.copy() encCols.append(self.encTarget) self.configDict['encCols'] = encCols self.configDict['encTarget'] = self.encTarget transformed_data = df[encCols].apply(lambda row: self.apply_enc(row.to_dict()), axis='columns') if targetFeature in transformed_data.columns: transformed_data.drop(targetFeature, inplace=True, axis = 1) if problemType.lower() == 'regression': df.drop('scaledTarget', inplace=True, axis = 1) df[catFtrs] = transformed_data # self.log.info('Status:- |... Target Encoding state is as follows: ') self.configDict['catEnc'] = [] if len(catFtrs) == 1: col = catFtrs[0] self.configDict['catEnc'].append({col:self.incCatEncoder['TargetAgg'].state.to_dict()}) else: for i, col in enumerate(catFtrs): if i==0: no = '' else: no = str(i) self.configDict['catEnc'].append({col:self.incCatEncoder['TransformerUnion']['TargetAgg'+no].state.to_dict()}) # print(self.incCatEncoder['TransformerUnion']['TargetAgg'].state) # self.log.info(self.incCatEncoder) self.log.info('Status:- |... Categorical to numeric feature conversion done: '+str(len(catFtrs))+' features converted') except Exception as inst: self.log.info("Error in categorical encoding "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) raise try: #scaler if self.incScaler: self.log.info("\n---------- Data Normalization has started ----------") self.incScaler = self.incScaler.partial_fit(df[numFtrs]) df[numFtrs] = self.incScaler.transform(df[numFtrs]) self.log.info( "---------- Normalization Done on Following features ----------") self.log.info(numFtrs) self.log.info('Status:- |... Normalization treatment done') except Exception as inst: self.log.info("Error in normalization "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) raise try: #outlierrem if self.incOutlierRem: self.log.info('\n---------- Performing outlier analysis ----------') df = df[df[numFtrs].apply(lambda x: False if self.apply_od_pipe(x.to_dict()) else True, axis=1)] self.log.info('\n <--- dataframe after outlier analysis --->') df.reset_index(drop=True, inplace=True) self.log.info(df.head(5)) self.log.info('Status:- |... Outlier treatment done') self.log.info('\n <--- shape of dataframe after outlier analysis --->') self.log.info(df.shape) except Exception as inst: self.log.info("Error in outlier treatment "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) raise #saveprofiler self.log.info('\n---------- Saving profiler models ----------') self.saveProfilerModels(deployLocation) self.log.info('<--- Profiler models saved at '+deployLocation+' --->') return df,targetFeature,missingValFtrs,numFtrs,catFtrs,self.le_dict,self.configDict,textFtrs,emptyFtrs,self.wordToNumericFeatures except Exception as inst: self.log.info("Error: dataProfiler failed "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def transformData(self, df, targetFeature, missingValFtrs,numFtrs, catFtrs, textFtrs): try: df = df.drop_duplicates(keep="first") df = df.reset_index(drop=True) df = df.replace('-', np.nan) df = df.replace('?', np.nan) text_mv_cols = list(set(missingValFtrs).intersection(set(textFtrs))) if len(text_mv_cols)>0: df[text_mv_cols] = df[text_mv_cols].fillna(value = 'NA') if 'num_fill' in self.configDict: if self.configDict['num_fill'] == 'drop': df = df.dropna(axis = 0, subset=self.allNumCols) elif self.configDict['num_fill'] == 'zero': df[self.allNumCols] = df[self.allNumCols].fillna(value = 0.0) else: for x in self.allNumCols: df[x] = df[x].fillna(value = self.configDict['num_fill'][x]) if 'cat_fill' in self.configDict: if self.configDict['cat_fill'] == 'drop': df = df.dropna(axis = 0, subset=self.allCatCols) elif self.configDict['cat_fill'] == 'zero': df[self.allCatCols] = df[self.allCatCols].fillna(value = 0.0) else: for x in self.allCatCols: df[x] = df[x].fillna(value = self.configDict['cat_fill'][x]) if self.incLabelMapping: df[targetFeature] = self.incLabelMapping.transform(df[targetFeature]) if self.incCatEncoder: transformed_data = df[catFtrs].apply(lambda row: self.apply_enc(row.to_dict(), isTrain=False), axis='columns') df[catFtrs] = transformed_data if self.incScaler: df[numFtrs] = self.incScaler.transform(df[numFtrs]) return df except Exception as inst: self.log.info("Error: DataProfiling transformation failed "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def checknumStr(self,dataframe,feature): try: dataframe[feature]=dataframe[feature].apply(lambda x: self.testStr(x)) return dataframe except: self.log.info("checknumStr failed") return dataframe #test whether the value is numeric /string def testStr(self,value): try: x=eval(value) return np.nan except: return value """ Missing values analysis Detects number of missing values in each column of dataframe """ def checksRows(self,dataframe,target_column,dataColumns): self.log.info( '\n----------- Checking Target Feature Empty Rows -----------') if self.targetColumn != '': numNa=dataframe.loc[(pd.isna(dataframe[self.targetColumn])),self.targetColumn].shape[0] self.log.info('------->No of Empty Rows in Target Fields: '+str(numNa)) if numNa >0: self.log.info('-------> Remove Empty Target Field Rows') dataframe = dataframe.dropna(axis=0, subset=[self.targetColumn]) self.log.info('-------> Remove Duplicate Rows') dataframe = dataframe.dropna(axis=0,how='all',subset=dataColumns) noofdplicaterows = dataframe.duplicated(keep='first').sum() dataframe = dataframe.drop_duplicates(keep="first") dataframe = dataframe.reset_index(drop=True) return dataframe,noofdplicaterows def apply_river_model(self, x, profModel): profModel.learn_one(x) return pd.Series(profModel.transform_one(x)) def apply_enc(self, x, isTrain=True): if isTrain: y = x[self.encTarget] self.incCatEncoder.learn_one(x, y) return pd.Series(self.incCatEncoder.transform_one(x)) def apply_od_pipe(self, x): score = self.incOutlierRem.score_one(x) is_anomaly = self.incOutlierRem.classify(score) self.incOutlierRem.learn_one(x) return is_anomaly #Convert Words To Number def s2n(self,value): try: x=eval(value) return x except: try: return w2n.word_to_num(value) except: return np.nan def convertWordToNumeric(self,dataframe,feature): try: dataframe[feature]=dataframe[feature].apply(lambda x: self.s2n(x)) return dataframe except Exception as inst: self.log.info("convertWordToNumeric Failed ===>"+str(inst)) return dataframe #test whether the value is numeric /string def testNum(self,value): try: x=eval(value) return x except: return np.nan ##check for numeric values in string column def checkNumeric(self,dataframe,feature): try: dataframe[feature]=dataframe[feature].apply(lambda x: self.testNum(x)) return dataframe except Exception as inst: self.log.info("checkNumeric Failed ===>"+str(inst)) return dataframe def smartFilter(self,feature,df,numericRatio): try: distinctCount = len(df[feature].unique()) numOfRows = df.shape[0] tempDataFrame=df.copy(deep=True) if(distinctCount != 1): self.log.info('-------> Feature :'+str(feature)) testDf = self.checkNumeric(tempDataFrame,feature) tempDf = testDf[feature] tempDf = tempDf.dropna() numberOfNonNullVals = tempDf.count() if(numberOfNonNullVals > int(numOfRows * numericRatio)): tempDataFrame=df.copy(deep=True) testDf = self.convertWordToNumeric(tempDataFrame,feature) tempDf = testDf[feature] tempDf = tempDf.dropna() self.log.info('----------> Numeric Status :Yes') return testDf,True else: #Wasnt't a numerical feature self.log.info('----------> Numeric Status :No') #numDf = self.checknumStr(df,feature) return df,False else: self.log.info( '\n---> No Numerics found in :' +str(feature)) return df,False except: self.log.info( '\n---> No Numerics found in :'+str(feature)) return df,False def dataFramecolType(self,dataFrame): dataFDtypes=[] try: dataColumns=list(dataFrame.columns) for i in dataColumns: dataType=dataFrame[i].dtypes dataFDtypes.append(tuple([i,str(dataType)])) return dataFDtypes except: self.log.info("error in dataFramecolyType") return dataFDtypes
riverML.py
import logging import pickle import os import sys import pandas as pd from river import stream from river.linear_model import LogisticRegression, SoftmaxRegression, LinearRegression from river.tree import ExtremelyFastDecisionTreeClassifier, HoeffdingAdaptiveTreeRegressor # from river.ensemble import AdaptiveRandomForestRegressor, AdaptiveRandomForestClassifier from river.neighbors import KNNClassifier, KNNRegressor from river.multiclass import OneVsRestClassifier from river.optim import SGD, Adam, AdaDelta, NesterovMomentum, RMSProp # from river.optim.losses import CrossEntropy, Log, MultiClassLoss, Poisson, RegressionLoss, BinaryLoss, Huber # from river.optim.initializers import Normal class riverML(object): def __init__(self): self.algoDict={'Online Logistic Regression':LogisticRegression, 'Online Softmax Regression':SoftmaxRegression, 'Online Decision Tree Classifier':ExtremelyFastDecisionTreeClassifier, 'Online KNN Classifier':KNNClassifier,'Online Linear Regression':LinearRegression, 'Online Decision Tree Regressor':HoeffdingAdaptiveTreeRegressor, 'Online KNN Regressor':KNNRegressor} self.optDict={'sgd': SGD, 'adam':Adam, 'adadelta':AdaDelta, 'nesterovmomentum':NesterovMomentum, 'rmsprop':RMSProp} self.log = logging.getLogger('eion') def getPrediction(self, model,X): testStream = stream.iter_pandas(X) preds = [] for (xi,yi) in testStream: pred = model.predict_one(xi) preds.append(pred) return pd.DataFrame(preds) def startLearn(self,problemType,algoName,params,xtrain,ytrain,noOfClasses=None): try: model = self.algoDict[algoName] params = self.parseParams(params, algoName) if problemType == 'classification': if noOfClasses>2: model = OneVsRestClassifier(classifier=model(**params)) else: model = model(**params) else: model = model(**params) trainStream = stream.iter_pandas(xtrain, ytrain) #head start for i, (xi, yi) in enumerate(trainStream): if i>100: break if yi!=None: model.learn_one(xi, yi) trainPredictedData = [] trainStream = stream.iter_pandas(xtrain, ytrain) for i, (xi, yi) in enumerate(trainStream): if yi!=None: trainPredictedData.append(model.predict_one(xi)) model.learn_one(xi, yi) trainPredictedData = pd.DataFrame(trainPredictedData) return algoName, params, model, trainPredictedData except Exception as inst: self.log.info( '\n-----> '+algoName+' failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def parseParams(self, params, algoName): try: from learner.parameters import parametersDefine paramsObj = parametersDefine() paramDict =paramsObj.paramDefine(params,method=None) paramDict = {k:v[0] for k,v in paramDict.items()} if algoName=='Online Logistic Regression' or algoName=='Online Softmax Regression' or algoName=='Online Linear Regression': opt = self.optDict[paramDict.pop('optimizer').lower()] lr = float(paramDict.pop('optimizer_lr')) paramDict['optimizer'] = opt(lr) return paramDict except Exception as inst: self.log.info( '\n-----> Parameter parsing failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
ensemble_bagging.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import time import os import sys import numpy as np import pandas as pd from sklearn import model_selection from sklearn.model_selection import train_test_split, KFold, cross_val_score from sklearn.model_selection import KFold #Classification metrics lib import logging import warnings warnings.filterwarnings('always') # "error", "ignore", "always", "default", "module" or "once" from learner.aion_matrix import aion_matrix from sklearn.preprocessing import binarize class ensemble_bagging(): def __init__(self,ensemble_params,scoreParam,MakeFP0,MakeFN0): self.ensemble_params = ensemble_params self.scoreParam=scoreParam self.MakeFP0 = MakeFP0 self.MakeFN0 = MakeFN0 self.log = logging.getLogger('eion') def add_alg2dict(self,k,v): b_dict={} b_dict[k]=v return b_dict def getSelected_algs_params(self,problemType,ensembleType,ensembleConfig): from learner.parameters import parametersDefine paramObj=parametersDefine() ensClass_algs_params={} algs_status={} for key,val in ensembleConfig.items(): for s,p in val.items(): if (s == "enable" and p == "True"): params = val['param'] params_eval = paramObj.paramDefine(params,None) params_eval = {param_key: param_value[0] for param_key, param_value in params_eval.items()} ensClass_algs_params[key]=params_eval else: pass return ensClass_algs_params def listEnsembleClassBaggingAlgs(self,ensClass_algs_params): from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier ensembleBaggingClassList=list() for key,val in ensClass_algs_params.items(): if (key == 'Logistic Regression'): lr=LogisticRegression() lr=lr.set_params(**val) ensembleBaggingClassList.append(lr) elif (key == 'Support Vector Machine'): svm=SVC() svm=svm.set_params(**val) ensembleBaggingClassList.append(svm) elif (key == 'Naive Bayes'): nb=GaussianNB() nb=nb.set_params(**val) ensembleBaggingClassList.append(nb) elif (key == 'K Nearest Neighbors'): knn=KNeighborsClassifier() knn=knn.set_params(**val) ensembleBaggingClassList.append(knn) elif (key == 'Decision Tree'): dt=DecisionTreeClassifier() dt=dt.set_params(**val) ensembleBaggingClassList.append(dt) elif (key == 'Random Forest'): rf=RandomForestClassifier() rf=rf.set_params(**val) ensembleBaggingClassList.append(rf) else: pass return ensembleBaggingClassList def listEnsembleRegBaggingAlgs(self,ensReg_algs_params): from sklearn.linear_model import Ridge from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor ensembleBaggingRegList=list() for key,val in ensReg_algs_params.items(): if (key == 'Linear Regression'): lir=LinearRegression() lir=lir.set_params(**val) ensembleBaggingRegList.append(lir) elif (key == 'Decision Tree'): dtr=DecisionTreeRegressor() dtr=dtr.set_params(**val) ensembleBaggingRegList.append(dtr) elif (key == 'Ridge'): ridge=Ridge() ridge=ridge.set_params(**val) ensembleBaggingRegList.append(ridge) else: ensembleBaggingRegList=[] return ensembleBaggingRegList def ensemble_bagging_classifier(self,X_train,y_train, X_test, y_test): ## New changes from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier ensemble_method = "Bagging_classifier" problemType='classification' ensembleType='bagging' model_dict=self.ensemble_params ensClass_algs_params = self.getSelected_algs_params(problemType,ensembleType,model_dict) ensembleBaggingList = self.listEnsembleClassBaggingAlgs(ensClass_algs_params) # clf_array = model_list clf_array=ensembleBaggingList # no. of base classifier num_trees = len(clf_array) # max_samples=float(max_samples) n_estimators = num_trees # random_state=seed bagging_mean={} bagging_std={} accuracy_basealgs_train={} accuracy_basealgs_test={} blable="" accuracy_score_test=0 kfold = model_selection.KFold(n_splits=10, random_state=None) bestScore=-0xFFFF scoredetails = '' threshold = -1 bestthreshold = -1 precisionscore =-1 bestprecisionscore=-1 recallscore = -1 bestrecallscore=-1 objClf = aion_matrix() if (ensemble_method == "Bagging_classifier"): #bagging ensemble of base classifier .e.g. KNeighborsClassifier base estimators, each built on random subsets of 40% of the samples and 50% of the features. for clf in clf_array: self.log.info('-----------> Ensemble Algorithm '+str(clf.__class__.__name__)) clf.fit(X_train, y_train) bagging_clf = BaggingClassifier(clf,n_estimators = num_trees, random_state=10) bagging_clf.fit(X_train, y_train) bagging_scores = cross_val_score(bagging_clf, X_train, y_train, cv=kfold,n_jobs=-1) #bagging_ensemble_t=bagging_clf.fit(X_train, y_train) if not X_test.empty: bag_predict=bagging_clf.predict(X_test) accuracy_score_test = objClf.get_score(self.scoreParam,y_test,bag_predict) else: accuracy_score_test = bagging_scores MakeFP0 = False MakeFN0 = False if self.MakeFP0: self.log.info('-------- Ensemble: Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = objClf.check_threshold(bagging_clf,X_train,y_train,threshold_range,'FP','') MakeFP0 = True self.log.info('-------- Calculate Threshold for FP End-------') if self.MakeFN0: self.log.info('-------- Ensemble: Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = objClf.check_threshold(bagging_clf,X_train,y_train,threshold_range,'FN','') MakeFN0 = True self.log.info('-------- Calculate Threshold for FN End-------') if threshold != -1: if not X_test.empty: predictedData = bagging_clf.predict_proba(X_test) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) #bug 12437 accuracy_score_test = objClf.get_score(self.scoreParam,y_test,predictedData) status,bscore,bthres,brscore,bpscore = objClf.getBestModel(MakeFP0,MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,accuracy_score_test,bestScore) if status: bestScore =bscore bestModel =bagging_clf.__class__.__name__ bestEstimator=bagging_clf bestthreshold = bthres bestBaseModel = clf.__class__.__name__ bestrecallscore = brscore bestprecisionscore = bpscore else: pass best_alg_name=bestEstimator.__class__.__name__ self.log.info('-----------> Best Bagging Classifier Model '+str(bestBaseModel)) self.log.info('-----------> Best Score '+str(bestScore)) # self.log.info('-----------> Threshold '+str(bestthreshold)) #bug 12438 if bestthreshold != -1: if not X_test.empty: predictedData_test = bestEstimator.predict_proba(X_test) predictedData_test = binarize(predictedData_test[:,1].reshape(-1, 1),threshold=bestthreshold) #bug 12437 predictedData_train = bestEstimator.predict_proba(X_train) predictedData_train = binarize(predictedData_train[:,1].reshape(-1, 1),threshold=bestthreshold) #bug 12437 else: if not X_test.empty: predictedData_test = bestEstimator.predict(X_test) predictedData_train = bestEstimator.predict(X_train) return bestEstimator,bestEstimator.get_params(),bestScore,best_alg_name,bestthreshold,bestprecisionscore,bestrecallscore def ensemble_bagging__regressor(self,X_train,y_train, X_test, y_test): from sklearn.ensemble import BaggingRegressor ensemble_method='Bagging_regressor' problemType='regression' ensembleType='bagging' model_dict=self.ensemble_params ensReg_algs_params = self.getSelected_algs_params(problemType,ensembleType,model_dict) ensembleBaggingList = self.listEnsembleRegBaggingAlgs(ensReg_algs_params) scoredetails = '' aion_matrixobj = aion_matrix() reg_array = ensembleBaggingList num_trees = len(reg_array) #self.log.info(num_trees) # max_samples=float(max_samples) n_estimators = num_trees r_state=10 bestModel='' bestParams={} bestScore=-sys.float_info.max #extension of bugfix 11656 objClf = aion_matrix() for reg in reg_array: self.log.info('-----------> Ensemble Algorithm '+str(reg.__class__.__name__)) nmodel=reg.fit(X_train, y_train) model = reg.__class__.__name__ estimator = BaggingRegressor(base_estimator=reg, random_state=r_state) bagging_ensemble_t=estimator.fit(X_train, y_train) predictedData = estimator.predict(X_test) score = objClf.get_score(self.scoreParam,y_test,predictedData) if self.scoreParam == "r2": if score > bestScore: bestScore =score bestModel =model bestEstimator=estimator else: if abs(score) < bestScore or bestScore == -sys.float_info.max: #extension of bugfix 11656 bestScore =abs(score) bestModel =model bestEstimator=estimator best_alg_name=bestEstimator.__class__.__name__ self.log.info('-----------> Best Ensemble Algorithm '+str(bestModel)) return bestEstimator,bestEstimator.get_params(),bestScore,best_alg_name
ensemble_voting.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import numpy as np import logging import warnings from sklearn.ensemble import VotingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Ridge from sklearn.preprocessing import binarize from sklearn.ensemble import VotingRegressor from sklearn.svm import SVC from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from learner.aion_matrix import aion_matrix warnings.filterwarnings('always') class ensemble_voting(): def __init__(self,ensemble_params,scoreParam): self.ensemble_params = ensemble_params self.scoreParam=scoreParam self.final_estimator_r='' self.final_estimator_c='' self.log = logging.getLogger('eion') ''' Read the aion config "Ensemble-Voting", parse the algorithm and associated params based on enable or True status.Not used now ''' def getSelected_algs_params(self,problemType,ensembleType,ensembleConfig): from learner.parameters import parametersDefine paramObj=parametersDefine() ensClass_algs_params={} # algs_status={} for key,val in ensembleConfig.items(): for s,p in val.items(): if (s == "enable" and p == "True"): params = val['param'] params_eval = paramObj.paramDefine(params,None) params_eval = {param_key: param_value[0] for param_key, param_value in params_eval.items()} ensClass_algs_params[key]=params_eval else: pass return ensClass_algs_params ''' To make array of voting algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this. ''' def listEnsembleClassVotingAlgs(self,ensClass_algs_params): ensembleVotingClassList=list() for key,val in ensClass_algs_params.items(): if (key == 'Logistic Regression'): lr=LogisticRegression() lr=lr.set_params(**val) ensembleVotingClassList.append(lr) elif (key == 'Support Vector Machine'): svm=SVC() svm=svm.set_params(**val) ensembleVotingClassList.append(svm) elif (key == 'Naive Bayes'): nb=GaussianNB() nb=nb.set_params(**val) ensembleVotingClassList.append(nb) elif (key == 'K Nearest Neighbors'): knn=KNeighborsClassifier() knn=knn.set_params(**val) ensembleVotingClassList.append(knn) elif (key == 'Decision Tree'): dt=DecisionTreeClassifier() dt=dt.set_params(**val) ensembleVotingClassList.append(dt) elif (key == 'Random Forest'): rf=RandomForestClassifier() rf=rf.set_params(**val) ensembleVotingClassList.append(rf) else: ## Algorithm not found in config, so forming empty alg list. If needs, make list with default alg. ensembleVotingClassList=[] pass return ensembleVotingClassList ''' To make array of voting regression algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this. ''' def listEnsembleRegVotingAlgs(self,ensReg_algs_params): ensembleVotingRegList=list() for key,val in ensReg_algs_params.items(): if (key == 'Linear Regression'): lir=LinearRegression() lir=lir.set_params(**val) ensembleVotingRegList.append(lir) elif (key == 'Decision Tree'): dtr=DecisionTreeRegressor() dtr=dtr.set_params(**val) ensembleVotingRegList.append(dtr) elif (key == 'Ridge'): ridge=Ridge() ridge=ridge.set_params(**val) ensembleVotingRegList.append(ridge) else: ## Algorithm not found in config, so forming empty alg list. If needs, make list with default alg. ensembleVotingRegList=[] return ensembleVotingRegList def ensemble_voting_classifier(self,X_train,y_train, X_test, y_test,MakeFP0,MakeFN0,modelList): #bug 12437 status='ERROR' model=None estimator=None score=None params=None threshold = -1 precisionscore =-1 recallscore = -1 objClf = aion_matrix() try: lr = LogisticRegression(solver='lbfgs',random_state=1,max_iter=200) rf = RandomForestClassifier(random_state=1) gnb = GaussianNB() svc = SVC(probability=True) #Need to keep probability=True, because cross_val_score,predict_proba fn calls knn=KNeighborsClassifier(n_neighbors=5) base_estimators = [] if 'Logistic Regression' in modelList: base_estimators.append(('LogisticRegression', lr)) self.log.info('-------- Ensemble: Logistic Regression-------') if 'Random Forest' in modelList: base_estimators.append(('RandomForestClassifier', rf)) self.log.info('-------- Ensemble: Random Forest-------') if 'Naive Bayes' in modelList: base_estimators.append(('GaussianNB', gnb)) self.log.info('-------- Ensemble: Naive Bayes-------') if 'Support Vector Machine' in modelList: self.log.info('-------- Ensemble: Support Vector Machine-------') base_estimators.append(('SVC', svc)) if 'K Nearest Neighbors' in modelList: base_estimators.append(('KNeighborsClassifier', knn)) self.log.info('-------- Ensemble: K Nearest Neighbors-------') if len(base_estimators) == 0: self.log.info('-------- Ensemble Voting is only supported for Logistic Regression, Random Forest Classifier, Naive Bayes, SVM and KNN -------') status = "UNSUPPORTED" return status, estimator,params,score,model,threshold,precisionscore,recallscore eclf1 = VotingClassifier(base_estimators, voting='soft') eclf1.fit(X_train, y_train) y_predict = eclf1.predict(X_test) score = objClf.get_score(self.scoreParam,y_test,y_predict) self.log.info('-------- Ensemble (VoteClassifier) Soft Score:'+str(score)) if MakeFP0: self.log.info('-------- Ensemble: Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = objClf.check_threshold(eclf1,X_train,y_train,threshold_range,'FP','') self.log.info('-------- Calculate Threshold for FP End-------') elif MakeFN0: self.log.info('-------- Ensemble: Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = objClf.check_threshold(eclf1,X_train,y_train,threshold_range,'FN','') self.log.info('-------- Calculate Threshold for FN End-------') if threshold != -1: predictedData = eclf1.predict_proba(X_test) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) #bug 12437 score = objClf.get_score(self.scoreParam,y_test,predictedData) status = 'SUCCESS' model =eclf1.__class__.__name__ estimator=eclf1 params = estimator.get_params() #bug 12437 - Removed ensemble hard voting as predict_proba in the later stages will break except Exception as Inst: #bug 12437 self.log.info('--------- Error in Ensemble Voting ---------\n') self.log.info(str(Inst)) return status,estimator,params,score,model,threshold,precisionscore,recallscore def ensemble_voting__regressor(self,X_train,y_train, X_test, y_test,modelList): scoredetails = '' vr_predict=None vr_model=None try: lr = LinearRegression() rfr = RandomForestRegressor(n_estimators=10, random_state=1) dtr=DecisionTreeRegressor() base_estimators = [] if 'Linear Regression' in modelList: base_estimators.append(('LinearRegression', lr)) if 'Decision Tree' in modelList: base_estimators.append(('DecisionTreeRegressor', dtr)) if 'Random Forest' in modelList: base_estimators.append(('RandomForestRegressor', rfr)) if len(base_estimators) == 0: base_estimators = [('LinearRegression', lr), ('RandomForestRegressor', rfr),('DecisionTreeRegressor', dtr)] voting_reg = VotingRegressor(base_estimators) vr_model=voting_reg.fit(X_train,y_train) vr_predict=voting_reg.predict(X_test) best_vr_alg=voting_reg.__class__.__name__ self.log.info('-----------> Voting regression Model '+str(best_vr_alg)) except Exception as e: self.log.info("voting regression Exception info: \n") self.log.info(e) aion_matrixobj = aion_matrix() score = aion_matrixobj.get_score(self.scoreParam,y_test,vr_predict) return voting_reg,voting_reg.get_params(),score,best_vr_alg
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
ensemble_stacking.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import numpy as np #Classification metrics lib import logging import warnings from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import binarize from sklearn.svm import SVC from sklearn.ensemble import StackingClassifier from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import StackingRegressor from sklearn.svm import LinearSVR from sklearn.linear_model import RidgeCV from sklearn.linear_model import LassoCV from learner.aion_matrix import aion_matrix warnings.filterwarnings('always') # "error", "ignore", "always", "default", "module" or "once" class ensemble_stacking(): def __init__(self,ensemble_params,scoreParam): self.ensemble_params = ensemble_params self.scoreParam=scoreParam self.final_estimator_r='' self.final_estimator_c='' self.log = logging.getLogger('eion') ## Read the aion config "Ensemble-Stacking", parse the algorithm and associated params based on enable or True status. def getSelected_algs_params(self,problemType,ensembleType,ensembleConfig): from learner.parameters import parametersDefine paramObj=parametersDefine() ensClass_algs_params={} # algs_status={} for key,val in ensembleConfig.items(): for s,p in val.items(): if (s == "enable" and p == "True"): params = val['param'] params_eval = paramObj.paramDefine(params,None) params_eval = {param_key: param_value[0] for param_key, param_value in params_eval.items()} ensClass_algs_params[key]=params_eval else: pass return ensClass_algs_params ## To make array of stacking algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this. def listEnsembleClassStackingAlgs(self,ensClass_algs_params): ensembleBaggingClassList=list() for key,val in ensClass_algs_params.items(): # print(key) if (key == 'Logistic Regression'): lr=LogisticRegression() lr=lr.set_params(**val) ensembleBaggingClassList.append(lr) elif (key == 'Support Vector Machine'): svm=SVC() svm=svm.set_params(**val) ensembleBaggingClassList.append(svm) elif (key == 'Naive Bayes'): nb=GaussianNB() nb=nb.set_params(**val) ensembleBaggingClassList.append(nb) elif (key == 'K Nearest Neighbors'): knn=KNeighborsClassifier() knn=knn.set_params(**val) ensembleBaggingClassList.append(knn) elif (key == 'Decision Tree'): dt=DecisionTreeClassifier() dt=dt.set_params(**val) ensembleBaggingClassList.append(dt) elif (key == 'Random Forest'): rf=RandomForestClassifier() rf=rf.set_params(**val) ensembleBaggingClassList.append(rf) else: ensembleBaggingClassList=[] pass return ensembleBaggingClassList ## To make array of stacking regression algorithm based on user config list. Not used now, in future if needed similar line with bagging ensemble, please use this. def listEnsembleRegStackingAlgs(self,ensReg_algs_params): ensembleBaggingRegList=list() for key,val in ensReg_algs_params.items(): if (key == 'LinearSVR'): lir=LinearSVR() lir=lir.set_params(**val) ensembleBaggingRegList.append(lir) elif (key == 'LinearRegression'): lr=LinearRegression() lr=lr.set_params(**val) ensembleBaggingRegList.append(lr) elif (key == 'LassoCV'): lcv=LassoCV() lcv=lcv.set_params(**val) ensembleBaggingRegList.append(lcv) elif (key == 'RandomForestRegressor'): rfr=RandomForestRegressor() rfr=rfr.set_params(**val) ensembleBaggingRegList.append(rfr) elif (key == 'RidgeCV'): ridge=RidgeCV() ridge=ridge.set_params(**val) ensembleBaggingRegList.append(ridge) else: ## NO algorithms found in configuration settings, instead of sending empty array,we can add any one of algorithms. ensembleBaggingRegList=[] return ensembleBaggingRegList def extract_params(self,dict): self.dict=dict for k,v in self.dict.items(): return k,v def stacking_params(self): for k,v in self.ensemble_params.items(): try: if (k == "max_features_percentage"): max_features_percentage=float(v) elif (k == "max_samples"): max_samples=float(v) elif (k == "seed"): seed=int(v) elif (k == "final_estimator_stack_c"): final_estimator_c=str(v) elif (k == "final_estimator_stack_r"): final_estimator_r=str(v) else: self.log.info("Invalid Param in ensemble advanced configuration.\n") except Exception as e: self.log.info("\n Ensemble config param parsing error"+str(e)) continue return final_estimator_c,final_estimator_r,seed,max_samples,max_features_percentage def ensemble_stacking_classifier(self,X_train,y_train, X_test, y_test,MakeFP0,MakeFN0,modelList): final_estimator_c,final_estimator_r,seed,max_samples,max_features_percentage= self.stacking_params() final_estimator_c="" final_estimator=final_estimator_c scoredetails='' lr = LogisticRegression(solver='lbfgs',random_state=1,max_iter=200) rf = RandomForestClassifier(random_state=2) gnb = GaussianNB() svc = SVC(probability=True) #Need to keep probability=True, because of cross_val_score,predict_proba fn calls knn=KNeighborsClassifier(n_neighbors=5) try: if (final_estimator == 'LogisticRegression'): final_estimator_a=lr elif (final_estimator == 'RandomForestClassifier'): final_estimator_a=rf elif (final_estimator == 'GaussianNB'): final_estimator_a=gnb elif (final_estimator == 'SVC'): final_estimator_a=svc elif (final_estimator == 'KNeighborsClassifier'): final_estimator_a=knn else: final_estimator_a=lr except Exception as e: final_estimator_a=lr self.log.info("Given stacking regression final estimator algorithm issue, using default one (LogisticRegression) as final_estimator now.\n") self.log.info(e) #stacking estimators base_estimators = [] if 'Logistic Regression' in modelList: base_estimators.append(('LogisticRegression', lr)) if 'Random Forest' in modelList: base_estimators.append(('RandomForestClassifier', rf)) if 'Naive Bayes' in modelList: base_estimators.append(('GaussianNB', gnb)) if 'Support Vector Machine' in modelList: base_estimators.append(('SVC', svc)) if 'K Nearest Neighbors' in modelList: base_estimators.append(('KNeighborsClassifier', knn)) if len(base_estimators) == 0: base_estimators = [('LogisticRegression', lr),('RandomForestClassifier', rf),('GaussianNB', gnb),('SVC', svc),('KNeighborsClassifier', knn)] stacking_c = StackingClassifier(estimators=base_estimators, final_estimator=final_estimator_a) stacking_c.fit(X_train, y_train) y_predict=stacking_c.predict(X_test) objClf = aion_matrix() accuracy_score_test = objClf.get_score(self.scoreParam,y_test,y_predict) MakeFP0 = False MakeFN0 = False threshold = -1 recallscore = -1 precisionscore =-1 if MakeFP0: self.log.info('-------- Ensemble: Calculate Threshold for FP Start-------') startRange = 0.0 endRange = 1.0 stepsize = 0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = objClf.check_threshold(stacking_c,X_train,y_train,threshold_range,'FP','') MakeFP0 = True self.log.info('-------- Calculate Threshold for FP End-------') elif MakeFN0: self.log.info('-------- Ensemble: Calculate Threshold for FN Start-------') startRange = 1.0 endRange = 0.0 stepsize = -0.01 threshold_range = np.arange(startRange,endRange,stepsize) threshold,precisionscore,recallscore = objClf.check_threshold(stacking_c,X_train,y_train,threshold_range,'FN','') MakeFN0 = True self.log.info('-------- Calculate Threshold for FN End-------') if threshold != -1: predictedData = stacking_c.predict_proba(X_test) predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold) #bug 12437 accuracy_score_test = objClf.get_score(self.scoreParam,y_test,predictedData) best_alg_stacking=stacking_c.__class__.__name__ self.log.info('-----------> Best Stacking Classifier Model '+str(best_alg_stacking)) self.log.info('-----------> Best Score '+str(accuracy_score_test)) return stacking_c,stacking_c.get_params(),accuracy_score_test,best_alg_stacking,threshold,precisionscore,recallscore def ensemble_stacking__regressor(self,X_train,y_train, X_test, y_test,modelList): final_estimator_c,final_estimator_r,seed,max_samples,max_features_percentage= self.stacking_params() final_estimator=final_estimator_r final_estimator_a=None scoredetails='' lr=LinearRegression() rcv=RidgeCV() svr=LinearSVR() lcv=LassoCV() rf=RandomForestRegressor(random_state=42) try: if (final_estimator == 'LinearRegression'): final_estimator_a=lr if (final_estimator == 'RidgeCV'): final_estimator_a=rcv elif (final_estimator == 'LinearSVR'): final_estimator_a=svr elif (final_estimator == 'LassoCV'): final_estimator_a=lcv elif (final_estimator == 'RandomForestRegressor'): final_estimator_a=rf else: #default is RidgeCV final_estimator_a=rcv except Exception as e: self.log.info("stacking regression Exception info: \n") self.log.info(e) final_estimator_a=rcv base_estimators = [] if 'Linear Regression' in modelList: base_estimators.append(('LinearRegression', lr)) if 'Ridge' in modelList: base_estimators.append(('RidgeCV', rcv)) if 'LinearSVR' in modelList: base_estimators.append(('LinearSVR', svr)) if 'Lasso' in modelList: base_estimators.append(('LassoCV', lcv)) if 'Random Forest' in modelList: base_estimators.append(('RandomForestRegressor', rf)) if len(base_estimators) == 0: base_estimators = [('LinearRegression', lr),('RidgeCV', rcv),('LinearSVR', svr),('LassoCV', lcv),('RandomForestRegressor', rf)] self.log.info("Stacking Base Alogs :"+str(base_estimators)) self.log.info("Final Estimator :"+final_estimator) stacking_regressor = StackingRegressor(estimators=base_estimators,final_estimator=final_estimator_a) stacking_r_model=stacking_regressor.fit(X_train, y_train) stacking_rpredict=stacking_regressor.predict(X_test) best_stacking_alg=stacking_regressor.__class__.__name__ #Accuracy accuracy_score_best=stacking_regressor.score(X_test, y_test) aion_matrixobj = aion_matrix() score = aion_matrixobj.get_score(self.scoreParam,y_test,stacking_rpredict) return stacking_regressor,stacking_regressor.get_params(),score,best_stacking_alg
__init__.py
from .genetic_optimization import GeneticOptimizationCV
genetic_optimization.py
from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor import warnings warnings.filterwarnings('ignore') from genetic_selection import GeneticSelectionCV def GeneticOptimizationCV(model,genetic_params,n_iter,scoring,cv): n_generations = n_iter estimator = model selector = GeneticSelectionCV(estimator,cv=cv,**genetic_params,n_generations=n_generations,scoring=scoring) return selector
pipelines.py
import itertools import logging from typing import Optional, Dict, Union from nltk import sent_tokenize import torch from transformers import( AutoModelForSeq2SeqLM, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer, ) logger = logging.getLogger(__name__) class QGPipeline: """Poor man's QG pipeline""" def __init__( self, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, ans_model: PreTrainedModel, ans_tokenizer: PreTrainedTokenizer, qg_format: str, use_cuda: bool ): self.model = model self.tokenizer = tokenizer self.ans_model = ans_model self.ans_tokenizer = ans_tokenizer self.qg_format = qg_format self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu" self.model.to(self.device) if self.ans_model is not self.model: self.ans_model.to(self.device) assert self.model.__class__.__name__ in ["T5ForConditionalGeneration", "BartForConditionalGeneration"] if "T5ForConditionalGeneration" in self.model.__class__.__name__: self.model_type = "t5" else: self.model_type = "bart" def __call__(self, inputs: str): inputs = " ".join(inputs.split()) sents, answers = self._extract_answers(inputs) flat_answers = list(itertools.chain(*answers)) if len(flat_answers) == 0: return [] if self.qg_format == "prepend": qg_examples = self._prepare_inputs_for_qg_from_answers_prepend(inputs, answers) else: qg_examples = self._prepare_inputs_for_qg_from_answers_hl(sents, answers) qg_inputs = [example['source_text'] for example in qg_examples] questions = self._generate_questions(qg_inputs) output = [{'answer': example['answer'], 'question': que} for example, que in zip(qg_examples, questions)] return output def _generate_questions(self, inputs): inputs = self._tokenize(inputs, padding=True, truncation=True) outs = self.model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), max_length=32, num_beams=4, ) questions = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in outs] return questions def _extract_answers(self, context): sents, inputs = self._prepare_inputs_for_ans_extraction(context) inputs = self._tokenize(inputs, padding=True, truncation=True) outs = self.ans_model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), max_length=32, ) dec = [self.ans_tokenizer.decode(ids, skip_special_tokens=False) for ids in outs] answers = [item.split('<sep>') for item in dec] answers = [i[:-1] for i in answers] return sents, answers def _tokenize(self, inputs, padding=True, truncation=True, add_special_tokens=True, max_length=512 ): inputs = self.tokenizer.batch_encode_plus( inputs, max_length=max_length, add_special_tokens=add_special_tokens, truncation=truncation, padding="max_length" if padding else False, pad_to_max_length=padding, return_tensors="pt" ) return inputs def _prepare_inputs_for_ans_extraction(self, text): sents = sent_tokenize(text) inputs = [] for i in range(len(sents)): source_text = "extract answers:" for j, sent in enumerate(sents): if i == j: sent = "<hl> %s <hl>" % sent source_text = "%s %s" % (source_text, sent) source_text = source_text.strip() if self.model_type == "t5": source_text = source_text + " </s>" inputs.append(source_text) return sents, inputs def _prepare_inputs_for_qg_from_answers_hl(self, sents, answers): inputs = [] for i, answer in enumerate(answers): if len(answer) == 0: continue for answer_text in answer: sent = sents[i] sents_copy = sents[:] answer_text = answer_text.strip() ans_start_idx = 0 # ans_start_idx = sent.index(answer_text) # if answer_text in sent: # ans_start_idx = sent.index(answer_text) # else: # continue sent = f"{sent[:ans_start_idx]} <hl> {answer_text} <hl> {sent[ans_start_idx + len(answer_text): ]}" sents_copy[i] = sent source_text = " ".join(sents_copy) source_text = f"generate question: {source_text}" if self.model_type == "t5": source_text = source_text + " </s>" inputs.append({"answer": answer_text, "source_text": source_text}) return inputs def _prepare_inputs_for_qg_from_answers_prepend(self, context, answers): flat_answers = list(itertools.chain(*answers)) examples = [] for answer in flat_answers: source_text = f"answer: {answer} context: {context}" if self.model_type == "t5": source_text = source_text + " </s>" examples.append({"answer": answer, "source_text": source_text}) return examples class MultiTaskQAQGPipeline(QGPipeline): def __init__(self, **kwargs): super().__init__(**kwargs) def __call__(self, inputs: Union[Dict, str]): if type(inputs) is str: # do qg return super().__call__(inputs) else: # do qa return self._extract_answer(inputs["question"], inputs["context"]) def _prepare_inputs_for_qa(self, question, context): source_text = f"question: {question} context: {context}" if self.model_type == "t5": source_text = source_text + " </s>" return source_text def _extract_answer(self, question, context): source_text = self._prepare_inputs_for_qa(question, context) inputs = self._tokenize([source_text], padding=False) outs = self.model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), max_length=16, ) answer = self.tokenizer.decode(outs[0], skip_special_tokens=True) return answer class E2EQGPipeline: def __init__( self, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, use_cuda: bool ) : self.model = model self.tokenizer = tokenizer self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu" self.model.to(self.device) assert self.model.__class__.__name__ in ["T5ForConditionalGeneration", "BartForConditionalGeneration"] if "T5ForConditionalGeneration" in self.model.__class__.__name__: self.model_type = "t5" else: self.model_type = "bart" self.default_generate_kwargs = { "max_length": 256, "num_beams": 4, "length_penalty": 1.5, "no_repeat_ngram_size": 3, "early_stopping": True, } def __call__(self, context: str, **generate_kwargs): inputs = self._prepare_inputs_for_e2e_qg(context) # TODO: when overrding default_generate_kwargs all other arguments need to be passsed # find a better way to do this if not generate_kwargs: generate_kwargs = self.default_generate_kwargs input_length = inputs["input_ids"].shape[-1] # max_length = generate_kwargs.get("max_length", 256) # if input_length < max_length: # logger.warning( # "Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)".format( # max_length, input_length # ) # ) outs = self.model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), **generate_kwargs ) prediction = self.tokenizer.decode(outs[0], skip_special_tokens=True) questions = prediction.split("<sep>") questions = [question.strip() for question in questions[:-1]] return questions def _prepare_inputs_for_e2e_qg(self, context): source_text = f"generate questions: {context}" if self.model_type == "t5": source_text = source_text + " </s>" inputs = self._tokenize([source_text], padding=False) return inputs def _tokenize( self, inputs, padding=True, truncation=True, add_special_tokens=True, max_length=512 ): inputs = self.tokenizer.batch_encode_plus( inputs, max_length=max_length, add_special_tokens=add_special_tokens, truncation=truncation, padding="max_length" if padding else False, pad_to_max_length=padding, return_tensors="pt" ) return inputs SUPPORTED_TASKS = { "question-generation": { "impl": QGPipeline, "default": { "model": "valhalla/t5-small-qg-hl", "ans_model": "valhalla/t5-small-qa-qg-hl", } }, "multitask-qa-qg": { "impl": MultiTaskQAQGPipeline, "default": { "model": "valhalla/t5-small-qa-qg-hl", } }, "e2e-qg": { "impl": E2EQGPipeline, "default": { "model": "valhalla/t5-small-e2e-qg", } } } def pipeline( task: str, model: Optional = None, tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None, qg_format: Optional[str] = "highlight", ans_model: Optional = None, ans_tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None, use_cuda: Optional[bool] = True, **kwargs, ): # Retrieve the task if task not in SUPPORTED_TASKS: raise KeyError("Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys()))) targeted_task = SUPPORTED_TASKS[task] task_class = targeted_task["impl"] # Use default model/config/tokenizer for the task if no model is provided if model is None: model = targeted_task["default"]["model"] # Try to infer tokenizer from model or config name (if provided as str) if tokenizer is None: if isinstance(model, str): tokenizer = model else: # Impossible to guest what is the right tokenizer here raise Exception( "Impossible to guess which tokenizer to use. " "Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer." ) # Instantiate tokenizer if needed if isinstance(tokenizer, (str, tuple)): if isinstance(tokenizer, tuple): # For tuple we have (tokenizer name, {kwargs}) tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1]) else: tokenizer = AutoTokenizer.from_pretrained(tokenizer) # Instantiate model if needed if isinstance(model, str): model = AutoModelForSeq2SeqLM.from_pretrained(model) if task == "question-generation": if ans_model is None: # load default ans model ans_model = targeted_task["default"]["ans_model"] ans_tokenizer = AutoTokenizer.from_pretrained(ans_model) ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model) else: # Try to infer tokenizer from model or config name (if provided as str) if ans_tokenizer is None: if isinstance(ans_model, str): ans_tokenizer = ans_model else: # Impossible to guest what is the right tokenizer here raise Exception( "Impossible to guess which tokenizer to use. " "Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer." ) # Instantiate tokenizer if needed if isinstance(ans_tokenizer, (str, tuple)): if isinstance(ans_tokenizer, tuple): # For tuple we have (tokenizer name, {kwargs}) ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer[0], **ans_tokenizer[1]) else: ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer) if isinstance(ans_model, str): ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model) if task == "e2e-qg": return task_class(model=model, tokenizer=tokenizer, use_cuda=use_cuda) elif task == "question-generation": return task_class(model=model, tokenizer=tokenizer, ans_model=ans_model, ans_tokenizer=ans_tokenizer, qg_format=qg_format, use_cuda=use_cuda) else: return task_class(model=model, tokenizer=tokenizer, ans_model=model, ans_tokenizer=tokenizer, qg_format=qg_format, use_cuda=use_cuda)
DRL_train.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import numpy as np import pandas as pd import time import sys import logging from reinforcement.agents.ddqn import TrainDDQN from reinforcement.agents.dqn import TrainDQN from reinforcement.utils import rounded_dict from tensorflow.keras.layers import Dense, Dropout from sklearn.model_selection import train_test_split from learner.machinelearning import machinelearning from learner.aion_matrix import aion_matrix from reinforcement.metrics import network_predictions from learner.machinelearning import machinelearning os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # CPU is faster than GPU on structured data #def TrainRL(input_csv_file, model_save_path, rl_config, RL_Algo_Name): class ReinformentLearning(): def __init__(self,rlConfig,scoreParam,modelType): self.rl_config= rlConfig self.scoreParam = scoreParam self.log = logging.getLogger('eion') self.modelType = modelType def TrainRL(self,xtrain,ytrain,xtest,ytest,algorithm,deployLocation): try: scoredetails = '' X_train, xval, y_train, yval = train_test_split(xtrain, ytrain, test_size=0.2, stratify=ytrain) X_train = np.array(X_train) y_train = np.array(y_train) xval = np.array(xval) yval = np.array(yval) valueCount=ytrain.value_counts() categoryCountList=valueCount.tolist() xtest = np.array(xtest) ytest = np.array(ytest) objClf = aion_matrix() episodes = self.rl_config['episodes'] # Total number of episodes warmup_steps = self.rl_config['warmup_steps'] # Amount of warmup steps to collect data with random policy memory_length = warmup_steps # Max length of the Replay Memory batch_size = self.rl_config['batch_size'] collect_steps_per_episode = self.rl_config['collect_steps_per_episode'] collect_every = self.rl_config['collect_every'] target_update_period = self.rl_config['target_update_period'] # Period to overwrite the target Q-network with the default Q-network target_update_tau = self.rl_config['target_update_tau'] # Soften the target model update n_step_update = self.rl_config['n_step_update'] learning_rate = self.rl_config['learning_rate'] # Learning rate gamma = self.rl_config['gamma'] # Discount factor min_epsilon = self.rl_config['min_epsilon'] # Minimal and final chance of choosing random action decay_episodes = episodes // 10 # Number of episodes to decay from 1.0 to `min_epsilon`` layers = [Dense(128, activation="relu"), #need modification Dense(64, activation="relu"), Dense(32, activation="relu"), Dense(len(np.unique(y_train)), activation=None)] logFilePath=os.path.join(deployLocation,'log') if algorithm == "DQN": start = time.time() modelName = "DQN" model_save_path = os.path.dirname(__file__) model = TrainDQN(episodes, warmup_steps, learning_rate, gamma, min_epsilon, decay_episodes, target_update_period=target_update_period,target_update_tau=target_update_tau, batch_size=batch_size, collect_steps_per_episode=collect_steps_per_episode,memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update, model_path=model_save_path,log_dir=logFilePath) model.compile_model(X_train,y_train,layers) model.q_net.summary() model.train(xval,yval) network = model.get_network() predictedytrain=network_predictions(network,np.array(xtrain)) predictedytest = network_predictions(network,np.array(xtest)) if "DDQN" == algorithm: start = time.time() modelName = "DDQN" model = TrainDDQN(episodes, warmup_steps, learning_rate, gamma, min_epsilon, decay_episodes, target_update_period=target_update_period,target_update_tau=target_update_tau, batch_size=batch_size, collect_steps_per_episode=collect_steps_per_episode,memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update,log_dir=logFilePath) model.compile_model(X_train,y_train,layers) model.q_net.summary() model.train(xval,yval) network = model.get_network() predictedytrain=network_predictions(network,np.array(xtrain)) predictedytest = network_predictions(network,np.array(xtest)) score = objClf.get_score(self.scoreParam,ytest,predictedytest) score = round(score,2) return (network,self.rl_config,score,algorithm,-1,-1,-1) except Exception as inst: self.log.info( '\n-----> RL Failed!!!.'+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
DRL_predict.py
import os import numpy as np import pandas as pd import time from DeepRL.agents.ddqn import TrainDDQN from DeepRL.agents.dqn import TrainDQN from DeepRL.dataprocess import get_train_test_val from DeepRL.utils import rounded_dict from tensorflow.keras.layers import Dense, Dropout from sklearn.model_selection import train_test_split os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # CPU is faster than GPU on structured data def PredictRL(input_csv_file, model_load_path, RL_hparams_config_file, RL_Algo_Name): if not (os.path.exists(model_load_path)): os.makedirs(model_load_path) episodes = RL_hparams_config_file['DeepRL']['episodes'] # Total number of episodes warmup_steps = RL_hparams_config_file['DeepRL']['warmup_steps'] # Amount of warmup steps to collect data with random policy memory_length = warmup_steps # Max length of the Replay Memory batch_size = RL_hparams_config_file['DeepRL']['batch_size'] collect_steps_per_episode = RL_hparams_config_file['DeepRL']['collect_steps_per_episode'] collect_every = RL_hparams_config_file['DeepRL']['collect_every'] target_update_period = RL_hparams_config_file['DeepRL']['target_update_period'] # Period to overwrite the target Q-network with the default Q-network target_update_tau = RL_hparams_config_file['DeepRL']['target_update_tau'] # Soften the target model update n_step_update = RL_hparams_config_file['DeepRL']['n_step_update'] learning_rate = RL_hparams_config_file['DeepRL']['learning_rate'] # Learning rate gamma = RL_hparams_config_file['DeepRL']['gamma'] # Discount factor min_epsilon = RL_hparams_config_file['DeepRL']['min_epsilon'] # Minimal and final chance of choosing random action decay_episodes = episodes // 10 # Number of episodes to decay from 1.0 to `min_epsilon`` #path = '/home/renith/Renith/Project/AION/Reinforcement/RL_Classification/Code/rl_text_classification/telemetry_data.csv' data = pd.read_csv(input_csv_file) device5 = data[data['device_id'] == "Device_1"] device5 = device5.drop(['device_id'], axis = 1) device5.reset_index(drop=True, inplace=True) target_value = [] for i in range(device5['device_status'].shape[0]): if(device5['device_status'][i] == "NORMAL"): target_value.append(0.0) else: target_value.append(1.0) device5['target'] = target_value device5 = device5.drop(['device_status'], axis = 1) X_test = device5.iloc[:,1:-1] y_test = device5.iloc[:,-1] X_test = X_test.astype(np.float32) y_test = y_test.astype(np.int32) #Normalization mini, maxi = X_test.min(axis=0), X_test.max(axis=0) X_test -= mini X_test /= maxi - mini min_class = [1] #Minority class maj_class = [0] #Majority class #X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.8, stratify=y_train) #X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, stratify=y_train) #X_train = np.array(X_train) #y_train = np.array(y_train) #X_val = np.array(X_val) #y_val = np.array(y_val) X_test = np.array(X_test) y_test = np.array(y_test) #X_train, y_train, X_test, y_test, X_val, y_val = get_train_test_val(X_train.values, y_train.values, X_test.values, y_test.values, # min_class, maj_class, val_frac=0.2) layers = [Dense(128, activation="relu"), Dense(64, activation="relu"), Dense(32, activation="relu"), Dense(2, activation=None)] if(RL_Algo_Name == "DDQN"): model = TrainDDQN(episodes, warmup_steps, learning_rate, gamma, min_epsilon, decay_episodes, target_update_period=target_update_period, target_update_tau=target_update_tau, batch_size=batch_size, collect_steps_per_episode=collect_steps_per_episode, memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update, model_path=model_load_path) elif(RL_Algo_Name == "DQN"): model = TrainDQN(episodes, warmup_steps, learning_rate, gamma, min_epsilon, decay_episodes, target_update_period=target_update_period, target_update_tau=target_update_tau, batch_size=batch_size, collect_steps_per_episode=collect_steps_per_episode, memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update, model_path=model_load_path) model.compile_model(X_test, y_test, layers) model.q_net.summary() #model.train(X_val, y_val, "F1") #print("Training Ended !!!!") stats = model.evaluate(X_test, y_test) print(rounded_dict(stats)) #stats = model.evaluate(X_train, y_train) #print(rounded_dict(stats))
metrics.py
import matplotlib.pyplot as plt import numpy as np import seaborn as sns from sklearn.metrics import (auc, average_precision_score, confusion_matrix, f1_score, precision_recall_curve, roc_curve,precision_score,recall_score) from tensorflow import constant from tf_agents.trajectories import time_step def network_predictions(network, X: np.ndarray) -> dict: """Computes y_pred using a given network. Input is array of data entries. :param network: The network to use to calculate metrics :type network: (Q)Network :param X: X data, input to network :type X: np.ndarray :return: Numpy array of predicted targets for given X :rtype: np.ndarray """ if not isinstance(X, np.ndarray): raise ValueError(f"`X` must be of type `np.ndarray` not {type(X)}") q, _ = network(X, step_type=constant([time_step.StepType.FIRST] * X.shape[0]), training=False) return np.argmax(q.numpy(), axis=1) # Max action for each x in X def decision_function(network, X: np.ndarray) -> dict: """Computes the score for the predicted class of each x in X using a given network. Input is array of data entries. :param network: The network to use to calculate the score per x in X :type network: (Q)Network :param X: X data, input to network :type X: np.ndarray :return: Numpy array of scores for given X :rtype: np.ndarray """ if not isinstance(X, np.ndarray): raise ValueError(f"`X` must be of type `np.ndarray` not {type(X)}") q, _ = network(X, step_type=constant([time_step.StepType.FIRST] * X.shape[0]), training=False) return np.max(q.numpy(), axis=1) # Value of max action for each x in X def classification_metrics(y_true: list, y_pred: list) -> dict: """Computes metrics using y_true and y_pred. :param y_true: True labels :type y_true: np.ndarray :param y_pred: Predicted labels, corresponding to y_true :type y_pred: np.ndarray :return: Dictionairy containing Geometric Mean, F1, Precision, Recall, TP, TN, FP, FN :rtype: dict """ if not isinstance(y_true, (list, tuple, np.ndarray)): raise ValueError(f"`y_true` must be of type `list` not {type(y_true)}") if not isinstance(y_pred, (list, tuple, np.ndarray)): raise ValueError(f"`y_pred` must be of type `list` not {type(y_pred)}") if len(y_true) != len(y_pred): raise ValueError("`X` and `y` must be of same length.") #G_mean = np.sqrt(recall * specificity) # Geometric mean of recall and specificity F1 = f1_score(y_true, y_pred, average='macro') # Default F-measure recall = recall_score(y_true,y_pred,average='macro') precision = precision_score(y_true,y_pred,average='macro') return {"F1": F1, "Precision": precision, "Recall": recall} def plot_pr_curve(network, X_test: np.ndarray, y_test: np.ndarray, X_train: np.ndarray = None, y_train: np.ndarray = None) -> None: # pragma: no cover """Plots PR curve of X_test and y_test of given network. Optionally plots PR curve of X_train and y_train. Average precision is shown in the legend. :param network: The network to use to calculate the PR curve :type network: (Q)Network :param X_test: X data, input to network :type X_test: np.ndarray :param y_test: True labels for `X_test` :type y_test: np.ndarray :param X_train: Optional X data to plot validation PR curve :type X_train: np.ndarray :param y_train: True labels for `X_val` :type y_train: np.ndarray :return: None :rtype: NoneType """ plt.plot((0, 1), (1, 0), color="black", linestyle="--", label="Baseline") # TODO: Consider changing baseline if X_train is not None and y_train is not None: y_val_score = decision_function(network, X_train) val_precision, val_recall, _ = precision_recall_curve(y_train, y_val_score) val_AP = average_precision_score(y_train, y_val_score) plt.plot(val_recall, val_precision, label=f"Train AP: {val_AP:.3f}") y_test_score = decision_function(network, X_test) test_precision, test_recall, _ = precision_recall_curve(y_test, y_test_score) test_AP = average_precision_score(y_test, y_test_score) plt.plot(test_recall, test_precision, label=f"Test AP: {test_AP:.3f}") plt.xlim((-0.05, 1.05)) plt.ylim((-0.05, 1.05)) plt.xlabel("Recall") plt.ylabel("Precision") plt.title("PR Curve") plt.gca().set_aspect("equal", adjustable="box") plt.legend(loc="lower left") plt.grid(True) plt.show() def plot_roc_curve(network, X_test: np.ndarray, y_test: np.ndarray, X_train: np.ndarray = None, y_train: np.ndarray = None) -> None: # pragma: no cover """Plots ROC curve of X_test and y_test of given network. Optionally plots ROC curve of X_train and y_train. Average precision is shown in the legend. :param network: The network to use to calculate the PR curve :type network: (Q)Network :param X_test: X data, input to network :type X_test: np.ndarray :param y_test: True labels for `X_test` :type y_test: np.ndarray :param X_train: Optional X data to plot validation PR curve :type X_train: np.ndarray :param y_train: True labels for `X_val` :type y_train: np.ndarray :return: None :rtype: NoneType """ plt.plot((0, 1), (0, 1), color="black", linestyle="--", label="Baseline") # TODO: Consider changing baseline if X_train is not None and y_train is not None: y_train_score = decision_function(network, X_train) fpr_train, tpr_train, _ = roc_curve(y_train, y_train_score) plt.plot(fpr_train, tpr_train, label=f"Train AUROC: {auc(fpr_train, tpr_train):.2f}") y_test_score = decision_function(network, X_test) fpr_test, tpr_test, _ = roc_curve(y_test, y_test_score) plt.plot(fpr_test, tpr_test, label=f"Test AUROC: {auc(fpr_test, tpr_test):.2f}") plt.xlim((-0.05, 1.05)) plt.ylim((-0.05, 1.05)) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.title("ROC Curve") plt.gca().set_aspect("equal", adjustable="box") plt.legend(loc="lower right") plt.grid(True) plt.show() def plot_confusion_matrix(TP: int, FN: int, FP: int, TN: int) -> None: # pragma: no cover """Plots confusion matric of given TP, FN, FP, TN. :param TP: True Positive :type TP: int :param FN: False Negative :type FN: int :param FP: False Positive :type FP: int :param TN: True Negative :type TN: int :return: None :rtype: NoneType """ if not all(isinstance(i, (int, np.integer)) for i in (TP, FN, FP, TN)): raise ValueError("Not all arguments are integers.") ticklabels = ("Minority", "Majority") sns.heatmap(((TP, FN), (FP, TN)), annot=True, fmt="_d", cmap="viridis", xticklabels=ticklabels, yticklabels=ticklabels) plt.title("Confusion matrix") plt.xlabel("Predicted labels") plt.ylabel("True labels") plt.show()
utils.py
import os from typing import List import numpy as np import pandas as pd from sklearn.model_selection import train_test_split def split_csv(fp: str = "./data/creditcard.csv", fp_dest: str = "./data", name: str = "credit", test_size: int = 0.5, strat_col: str = "Class") -> None: """Splits a csv file in two, in a stratified fashion. Format for filenames will be `{name}0.csv and `{name}1.csv`. :param fp: The path at which the csv file is located. :type fp: str :param fp_dest: The path to save the train and test files. :type fp_dest: str :param name: The prefix for the files. :type name: str :param test_size: The fraction of total size for the test file. :type test_size: float :param strat_col: The column in the original csv file to stratify. :return: None, two files located at `fp_dest`. :rtype: NoneType """ if not os.path.isfile(fp): raise FileNotFoundError(f"File at {fp} does not exist.") if not os.path.isdir(fp_dest): raise ValueError(f"Directory at {fp_dest} does not exist.") if not 0 < test_size < 1: raise ValueError(f"{test_size} is not in interval 0 < x < 1.") df = pd.read_csv(fp) if not (strat_col in df.columns): raise ValueError(f"Stratify column {strat_col} not found in DataFrame.") train, test = train_test_split(df, test_size=test_size, stratify=df[strat_col]) train.to_csv(f"{fp_dest}/{name}0.csv", index=False) test.to_csv(f"{fp_dest}/{name}1.csv", index=False) def rounded_dict(d: dict, precision: int = 6) -> dict: """Rounds all values in a dictionairy to `precision` digits after the decimal point. :param d: Dictionairy containing only floats or ints as values :type d: dict :return: Rounded dictionairy :rtype: dict """ return {k: round(v, precision) for k, v in d.items()} def imbalance_ratio(y: np.ndarray, min_classes: List[int] = [1], maj_classes: List[int] = [0]) -> float: """Calculates imbalance ratio of minority class(es) and majority class(es). :param y: y-vector with labels. :type y: np.ndarray :param min_classes: The labels of the minority classes :type min_classes: list :param maj_classes: The labels of the minority classes :type maj_classes: list :return: The imbalance ratio :rtype: float """ return np.isin(y, min_classes).sum() / np.isin(y, maj_classes).sum()
dataprocess.py
import os from typing import List, Tuple import numpy as np from pandas import read_csv from sklearn.model_selection import train_test_split from sklearn.utils import shuffle from tensorflow.keras.datasets import cifar10, fashion_mnist, imdb, mnist from tensorflow.keras.preprocessing.sequence import pad_sequences TrainTestData = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray] TrainTestValData = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray] def load_image(data_source: str) -> TrainTestData: """ Loads one of the following image datasets: {mnist, famnist, cifar10}. Normalizes the data. Returns X and y for both train and test datasets. Dtypes of X's and y's will be `float32` and `int32` to be compatible with `tf_agents`. :param data_source: Either mnist, famnist or cifar10 :type data_source: str :return: Tuple of (X_train, y_train, X_test, y_test) containing original split of train/test :rtype: tuple """ reshape_shape = -1, 28, 28, 1 if data_source == "mnist": (X_train, y_train), (X_test, y_test) = mnist.load_data() elif data_source == "famnist": (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data() elif data_source == "cifar10": (X_train, y_train), (X_test, y_test) = cifar10.load_data() reshape_shape = -1, 32, 32, 3 else: raise ValueError("No valid `data_source`.") X_train = X_train.reshape(reshape_shape).astype(np.float32) # Float32 is the expected dtype for the observation spec in the env X_test = X_test.reshape(reshape_shape).astype(np.float32) X_train /= 255 # /= is not available when casting int to float: https://stackoverflow.com/a/48948461/10603874 X_test /= 255 y_train = y_train.reshape(y_train.shape[0], ).astype(np.int32) y_test = y_test.reshape(y_test.shape[0], ).astype(np.int32) return X_train, y_train, X_test, y_test def load_csv(fp_train: str, fp_test: str, label_col: str, drop_cols: List[str], normalization: bool = False) -> TrainTestData: """ Loads any csv-file from local filepaths. Returns X and y for both train and test datasets. Option to normalize the data with min-max normalization. Only csv-files with float32 values for the features and int32 values for the labels supported. Source for dataset: https://mimic-iv.mit.edu/ :param fp_train: Location of the train csv-file :type fp_train: str :param fp_test: Location of the test csv-file :type fp_test: str :param label_col: The name of the column containing the labels of the data :rtype label_col: str :param drop_cols: List of the names of the columns to be dropped. `label_col` gets dropped automatically :rtype drop_cols: List of strings :param normalization: Normalize the data with min-max normalization? :type normalization: bool :return: Tuple of (X_train, y_train, X_test, y_test) containing original split of train/test :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray] """ if not os.path.isfile(fp_train): raise FileNotFoundError(f"`fp_train` {fp_train} does not exist.") if not os.path.isfile(fp_test): raise FileNotFoundError(f"`fp_test` {fp_test} does not exist.") if not isinstance(normalization, bool): raise TypeError(f"`normalization` must be of type `bool`, not {type(normalization)}") X_train = read_csv(fp_train).astype(np.float32) # DataFrames directly converted to float32 X_test = read_csv(fp_test).astype(np.float32) y_train = X_train[label_col].astype(np.int32) y_test = X_test[label_col].astype(np.int32) X_train.drop(columns=drop_cols + [label_col], inplace=True) # Dropping cols and label column X_test.drop(columns=drop_cols + [label_col], inplace=True) # Other data sources are already normalized. RGB values are always in range 0 to 255. if normalization: mini, maxi = X_train.min(axis=0), X_train.max(axis=0) X_train -= mini X_train /= maxi - mini X_test -= mini X_test /= maxi - mini return X_train.values, y_train.values, X_test.values, y_test.values # Numpy arrays def load_imdb(config: Tuple[int, int] = (5_000, 500)) -> TrainTestData: """Loads the IMDB dataset. Returns X and y for both train and test datasets. :param config: Tuple of number of most frequent words and max length of each sequence. :type config: str :return: Tuple of (X_train, y_train, X_test, y_test) containing original split of train/test :rtype: tuple """ if not isinstance(config, (tuple, list)): raise TypeError(f"{type(config)} is no valid datatype for `config`.") if len(config) != 2: raise ValueError("Tuple length of `config` must be 2.") if not all(i > 0 for i in config): raise ValueError("All integers of `config` must be > 0.") (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=config[0]) X_train = pad_sequences(X_train, maxlen=config[1]) X_test = pad_sequences(X_test, maxlen=config[1]) y_train = y_train.astype(np.int32) y_test = y_test.astype(np.int32) return X_train, y_train, X_test, y_test def get_train_test_val(X_train: np.ndarray, y_train: np.ndarray, X_test: np.ndarray, y_test: np.ndarray, min_classes: List[int], maj_classes: List[int], imb_ratio: float = None, imb_test: bool = True, val_frac: float = 0.25, print_stats: bool = True) -> TrainTestValData: """ Imbalances data and divides the data into train, test and validation sets. The imbalance rate of each individual dataset is approx. the same as the given `imb_ratio`. :param X_train: The X_train data :type X_train: np.ndarray :param y_train: The y_train data :type y_train: np.ndarray :param X_test: The X_test data :type X_test: np.ndarray :param y_test: The y_test data :type y_test: np.ndarray :param min_classes: List of labels of all minority classes :type min_classes: list :param maj_classes: List of labels of all majority classes. :type maj_classes: list :param imb_ratio: Imbalance ratio for minority to majority class: len(minority datapoints) / len(majority datapoints) If the `imb_ratio` is None, data will not be imbalanced and will only be relabeled to 1's and 0's. :type imb_ratio: float :param imb_test: Imbalance the test dataset? :type imb_test: bool :param val_frac: Fraction to take from X_train and y_train for X_val and y_val :type val_frac: float :param print_stats: Print the imbalance ratio of the imbalanced data? :type print_stats: bool :return: Tuple of (X_train, y_train, X_test, y_test, X_val, y_val) :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray] """ if not 0 < val_frac < 1: raise ValueError(f"{val_frac} is not in interval 0 < x < 1.") if not isinstance(print_stats, bool): raise TypeError(f"`print_stats` must be of type `bool`, not {type(print_stats)}.") X_train, y_train = imbalance_data(X_train, y_train, min_classes, maj_classes, imb_ratio=imb_ratio) # Only imbalance test-data if imb_test is True X_test, y_test = imbalance_data(X_test, y_test, min_classes, maj_classes, imb_ratio=imb_ratio if imb_test else None) # stratify=y_train to ensure class balance is kept between train and validation datasets X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=val_frac, stratify=y_train) if print_stats: p_train, p_test, p_val = [((y == 1).sum(), imbalance_ratio(y)) for y in (y_train, y_test, y_val)] print(f"Imbalance ratio `p`:\n" f"\ttrain: n={p_train[0]}, p={p_train[1]:.6f}\n" f"\ttest: n={p_test[0]}, p={p_test[1]:.6f}\n" f"\tvalidation: n={p_val[0]}, p={p_val[1]:.6f}") return X_train, y_train, X_test, y_test, X_val, y_val def imbalance_data(X: np.ndarray, y: np.ndarray, min_class: List[int], maj_class: List[int], imb_ratio: float = None) -> Tuple[np.ndarray, np.ndarray]: """ Split data in minority and majority, only values in {min_class, maj_class} will be kept. (Possibly) decrease minority rows to match the imbalance rate. If initial imb_ratio of dataset is lower than given `imb_ratio`, the imb_ratio of the returned data will not be changed. If the `imb_ratio` is None, data will not be imbalanced and will only be relabeled to 1's and 0's. """ if not isinstance(X, np.ndarray): raise TypeError(f"`X` must be of type `np.ndarray` not {type(X)}") if not isinstance(y, np.ndarray): raise TypeError(f"`y` must be of type `np.ndarray` not {type(y)}") if X.shape[0] != y.shape[0]: raise ValueError("`X` and `y` must contain the same amount of rows.") if not isinstance(min_class, (list, tuple)): raise TypeError("`min_class` must be of type list or tuple.") if not isinstance(maj_class, (list, tuple)): raise TypeError("`maj_class` must be of type list or tuple.") if (imb_ratio is not None) and not (0 < imb_ratio < 1): raise ValueError(f"{imb_ratio} is not in interval 0 < imb_ratio < 1.") if imb_ratio is None: # Do not imbalance data if no `imb_ratio` is given imb_ratio = 1 X_min = X[np.isin(y, min_class)] # Mask the correct indexes X_maj = X[np.isin(y, maj_class)] # Only keep data/labels for x in {min_class, maj_class} and forget all other min_len = int(X_maj.shape[0] * imb_ratio) # Amount of rows to select from minority classes to get to correct imbalance ratio # Keep all majority rows, decrease minority rows to match `imb_ratio` X_min = X_min[np.random.choice(X_min.shape[0], min(min_len, X_min.shape[0]), replace=False), :] X_imb = np.concatenate([X_maj, X_min]).astype(np.float32) y_imb = np.concatenate((np.zeros(X_maj.shape[0]), np.ones(X_min.shape[0]))).astype(np.int32) X_imb, y_imb = shuffle(X_imb, y_imb) return X_imb, y_imb
dqn.py
import os import pickle from datetime import datetime import numpy as np import tensorflow as tf from reinforcement.environments.classifierenv import ClassifierEnv from reinforcement.metrics import (classification_metrics, decision_function, network_predictions, plot_pr_curve, plot_roc_curve) from reinforcement.utils import imbalance_ratio from tensorflow import data from tensorflow.keras.optimizers import Adam #from tf_agents.agents.dqn.dqn_agent import DdqnAgent from tf_agents.agents import DqnAgent from tf_agents.drivers.dynamic_step_driver import DynamicStepDriver from tf_agents.environments.tf_py_environment import TFPyEnvironment from tf_agents.networks.sequential import Sequential from tf_agents.policies.random_tf_policy import RandomTFPolicy from tf_agents.replay_buffers.tf_uniform_replay_buffer import \ TFUniformReplayBuffer from tf_agents.utils import common class TrainDQN(): """Wrapper for DDQN training, validation, saving etc.""" def __init__(self, episodes: int, warmup_steps: int, learning_rate: float, gamma: float, min_epsilon: float, decay_episodes: int, model_path: str = None, log_dir: str = None, batch_size: int = 64, memory_length: int = None, collect_steps_per_episode: int = 1, val_every: int = None, target_update_period: int = 1, target_update_tau: float = 1.0, progressbar: bool = True, n_step_update: int = 1, gradient_clipping: float = 1.0, collect_every: int = 1) -> None: """ Wrapper to make training easier. Code is partly based of https://www.tensorflow.org/agents/tutorials/1_dqn_tutorial :param episodes: Number of training episodes :type episodes: int :param warmup_steps: Number of episodes to fill Replay Buffer with random state-action pairs before training starts :type warmup_steps: int :param learning_rate: Learning Rate for the Adam Optimizer :type learning_rate: float :param gamma: Discount factor for the Q-values :type gamma: float :param min_epsilon: Lowest and final value for epsilon :type min_epsilon: float :param decay_episodes: Amount of episodes to decay from 1 to `min_epsilon` :type decay_episodes: int :param model_path: Location to save the trained model :type model_path: str :param log_dir: Location to save the logs, usefull for TensorBoard :type log_dir: str :param batch_size: Number of samples in minibatch to train on each step :type batch_size: int :param memory_length: Maximum size of the Replay Buffer :type memory_length: int :param collect_steps_per_episode: Amount of data to collect for Replay Buffer each episiode :type collect_steps_per_episode: int :param collect_every: Step interval to collect data during training :type collect_every: int :param val_every: Validate the model every X episodes using the `collect_metrics()` function :type val_every: int :param target_update_period: Update the target Q-network every X episodes :type target_update_period: int :param target_update_tau: Parameter for softening the `target_update_period` :type target_update_tau: float :param progressbar: Enable or disable the progressbar for collecting data and training :type progressbar: bool :return: None :rtype: NoneType """ self.episodes = episodes # Total episodes self.warmup_steps = warmup_steps # Amount of warmup steps before training self.batch_size = batch_size # Batch size of Replay Memory self.collect_steps_per_episode = collect_steps_per_episode # Amount of steps to collect data each episode self.collect_every = collect_every # Step interval to collect data during training self.learning_rate = learning_rate # Learning Rate self.gamma = gamma # Discount factor self.min_epsilon = min_epsilon # Minimal chance of choosing random action self.decay_episodes = decay_episodes # Number of episodes to decay from 1.0 to `EPSILON` self.target_update_period = target_update_period # Period for soft updates self.target_update_tau = target_update_tau self.progressbar = progressbar # Enable or disable the progressbar for collecting data and training self.n_step_update = n_step_update self.gradient_clipping = gradient_clipping # Clip the loss self.compiled = False NOW = "DQN" #datetime.now().strftime("%Y%m%d_%H%M%S") if memory_length is not None: self.memory_length = memory_length # Max Replay Memory length else: self.memory_length = warmup_steps if val_every is not None: self.val_every = val_every # Validate the policy every `val_every` episodes else: self.val_every = self.episodes // min(50, self.episodes) # Can't validate the model 50 times if self.episodes < 50 if model_path is not None: #if os.path.exists(model_path + "/" + NOW + ".pkl"): # os.remove(model_path + "/" + NOW + ".pkl") self.model_path = model_path + "/" + NOW + ".pkl" else: self.model_path = "./models/" + NOW + ".pkl" if log_dir is None: log_dir = "./logs/" + NOW self.writer = tf.summary.create_file_writer(log_dir) def compile_model(self, X_train, y_train, layers: list = [], imb_ratio: float = None, loss_fn=common.element_wise_squared_loss) -> None: """Initializes the neural networks, DDQN-agent, collect policies and replay buffer. :param X_train: Training data for the model. :type X_train: np.ndarray :param y_train: Labels corresponding to `X_train`. 1 for the positive class, 0 for the negative class. :param y_train: np.ndarray :param layers: List of layers to feed into the TF-agents custom Sequential(!) layer. :type layers: list :param imb_ratio: The imbalance ratio of the data. :type imb_ratio: float :param loss_fn: Callable loss function :type loss_fn: tf.compat.v1.losses :return: None :rtype: NoneType """ if imb_ratio is None: imb_ratio = imbalance_ratio(y_train) self.train_env = TFPyEnvironment(ClassifierEnv(X_train, y_train, imb_ratio)) self.global_episode = tf.Variable(0, name="global_episode", dtype=np.int64, trainable=False) # Global train episode counter # Custom epsilon decay: https://github.com/tensorflow/agents/issues/339 epsilon_decay = tf.compat.v1.train.polynomial_decay( 1.0, self.global_episode, self.decay_episodes, end_learning_rate=self.min_epsilon) self.q_net = Sequential(layers, self.train_env.observation_spec()) self.agent = DqnAgent(self.train_env.time_step_spec(), self.train_env.action_spec(), q_network=self.q_net, optimizer=Adam(learning_rate=self.learning_rate), td_errors_loss_fn=loss_fn, train_step_counter=self.global_episode, target_update_period=self.target_update_period, target_update_tau=self.target_update_tau, gamma=self.gamma, epsilon_greedy=epsilon_decay, n_step_update=self.n_step_update, gradient_clipping=self.gradient_clipping) self.agent.initialize() self.random_policy = RandomTFPolicy(self.train_env.time_step_spec(), self.train_env.action_spec()) self.replay_buffer = TFUniformReplayBuffer(data_spec=self.agent.collect_data_spec, batch_size=self.train_env.batch_size, max_length=self.memory_length) self.warmup_driver = DynamicStepDriver(self.train_env, self.random_policy, observers=[self.replay_buffer.add_batch], num_steps=self.warmup_steps) # Uses a random policy self.collect_driver = DynamicStepDriver(self.train_env, self.agent.collect_policy, observers=[self.replay_buffer.add_batch], num_steps=self.collect_steps_per_episode) # Uses the epsilon-greedy policy of the agent self.agent.train = common.function(self.agent.train) # Optimalization self.warmup_driver.run = common.function(self.warmup_driver.run) self.collect_driver.run = common.function(self.collect_driver.run) self.compiled = True def train(self, *args) -> None: """Starts the training of the model. Includes warmup period, metrics collection and model saving. :param *args: All arguments will be passed to `collect_metrics()`. This can be usefull to pass callables, testing environments or validation data. Overwrite the TrainDQN.collect_metrics() function to use your own *args. :type *args: Any :return: None :rtype: NoneType, last step is saving the model as a side-effect """ assert self.compiled, "Model must be compiled with model.compile_model(X_train, y_train, layers) before training." # Warmup period, fill memory with random actions if self.progressbar: print(f"\033[92mCollecting data for {self.warmup_steps:_} steps... This might take a few minutes...\033[0m") self.warmup_driver.run(time_step=None, policy_state=self.random_policy.get_initial_state(self.train_env.batch_size)) if self.progressbar: print(f"\033[92m{self.replay_buffer.num_frames():_} frames collected!\033[0m") dataset = self.replay_buffer.as_dataset(sample_batch_size=self.batch_size, num_steps=self.n_step_update + 1, num_parallel_calls=data.experimental.AUTOTUNE).prefetch(data.experimental.AUTOTUNE) iterator = iter(dataset) def _train(): experiences, _ = next(iterator) return self.agent.train(experiences).loss _train = common.function(_train) # Optimalization ts = None policy_state = self.agent.collect_policy.get_initial_state(self.train_env.batch_size) print('Before Collect Metrics') self.collect_metrics(*args) # Initial collection for step 0 print('After Collect Metrics') for _ in range(self.episodes): if not self.global_episode % self.collect_every: # Collect a few steps using collect_policy and save to `replay_buffer` if self.collect_steps_per_episode != 0: ts, policy_state = self.collect_driver.run(time_step=ts, policy_state=policy_state) # Sample a batch of data from `replay_buffer` and update the agent's network train_loss = _train() if not self.global_episode % self.val_every: with self.writer.as_default(): tf.summary.scalar("train_loss", train_loss, step=self.global_episode) self.collect_metrics(*args) def collect_metrics(self, X_val: np.ndarray, y_val: np.ndarray, save_best: str = None): """Collects metrics using the trained Q-network. :param X_val: Features of validation data, same shape as X_train :type X_val: np.ndarray :param y_val: Labels of validation data, same shape as y_train :type y_val: np.ndarra :param save_best: Saving the best model of all validation runs based on given metric: Choose one of: {Gmean, F1, Precision, Recall, TP, TN, FP, FN} This improves stability since the model at the last episode is not guaranteed to be the best model. :type save_best: str """ y_pred = network_predictions(self.agent._target_q_network, X_val) print('classification_metrics') stats = classification_metrics(y_val, y_pred) print('Before AVGQ') avgQ = np.mean(decision_function(self.agent._target_q_network, X_val)) # Max action for each x in X print('After AVGQ') if save_best is not None: if not hasattr(self, "best_score"): # If no best model yet self.best_score = 0.0 if stats.get(save_best) >= self.best_score: # Overwrite best model self.save_network() # Saving directly to avoid shallow copy without trained weights self.best_score = stats.get(save_best) with self.writer.as_default(): tf.summary.scalar("AverageQ", avgQ, step=self.global_episode) # Average Q-value for this epoch for k, v in stats.items(): tf.summary.scalar(k, v, step=self.global_episode) def evaluate(self, X_test, y_test): """ Final evaluation of trained Q-network with X_test and y_test. Optional PR and ROC curve comparison to X_train, y_train to ensure no overfitting is taking place. :param X_test: Features of test data, same shape as X_train :type X_test: np.ndarray :param y_test: Labels of test data, same shape as y_train :type y_test: np.ndarray :param X_train: Features of train data :type X_train: np.ndarray :param y_train: Labels of train data :type y_train: np.ndarray """ #if hasattr(self, "best_score"): # print(f"\033[92mBest score: {self.best_score:6f}!\033[0m") # network = self.load_network(self.model_path) # Load best saved model #else: # network = self.agent._target_q_network # Load latest target model #network = self.load_network(self.model_path) #if (X_train is not None) and (y_train is not None): # plot_pr_curve(network, X_test, y_test, X_train, y_train) # plot_roc_curve(network, X_test, y_test, X_train, y_train) y_pred = network_predictions(self.agent._target_q_network, X_test) return classification_metrics(y_test, y_pred) def save_network(self): print('save_network') """Saves Q-network as pickle to `model_path`.""" with open(self.model_path, "wb") as f: # Save Q-network as pickle pickle.dump(self.agent._target_q_network, f) def get_network(self): """Static method to load Q-network pickle from given filepath. :param fp: Filepath to the saved pickle of the network :type fp: str :returns: The network-object loaded from a pickle file. :rtype: tensorflow.keras.models.Model """ return self.agent._target_q_network
ddqn.py
import os import pickle from datetime import datetime import numpy as np import tensorflow as tf from reinforcement.environments.classifierenv import ClassifierEnv from reinforcement.metrics import (classification_metrics, decision_function, network_predictions, plot_pr_curve, plot_roc_curve) from reinforcement.utils import imbalance_ratio from tensorflow import data from tensorflow.keras.optimizers import Adam from tf_agents.agents.dqn.dqn_agent import DdqnAgent from tf_agents.drivers.dynamic_step_driver import DynamicStepDriver from tf_agents.environments.tf_py_environment import TFPyEnvironment from tf_agents.networks.sequential import Sequential from tf_agents.policies.random_tf_policy import RandomTFPolicy from tf_agents.replay_buffers.tf_uniform_replay_buffer import \ TFUniformReplayBuffer from tf_agents.utils import common class TrainDDQN(): """Wrapper for DDQN training, validation, saving etc.""" def __init__(self, episodes: int, warmup_steps: int, learning_rate: float, gamma: float, min_epsilon: float, decay_episodes: int, model_path: str = None, log_dir: str = None, batch_size: int = 64, memory_length: int = None, collect_steps_per_episode: int = 1, val_every: int = None, target_update_period: int = 1, target_update_tau: float = 1.0, progressbar: bool = True, n_step_update: int = 1, gradient_clipping: float = 1.0, collect_every: int = 1) -> None: """ Wrapper to make training easier. Code is partly based of https://www.tensorflow.org/agents/tutorials/1_dqn_tutorial :param episodes: Number of training episodes :type episodes: int :param warmup_steps: Number of episodes to fill Replay Buffer with random state-action pairs before training starts :type warmup_steps: int :param learning_rate: Learning Rate for the Adam Optimizer :type learning_rate: float :param gamma: Discount factor for the Q-values :type gamma: float :param min_epsilon: Lowest and final value for epsilon :type min_epsilon: float :param decay_episodes: Amount of episodes to decay from 1 to `min_epsilon` :type decay_episodes: int :param model_path: Location to save the trained model :type model_path: str :param log_dir: Location to save the logs, usefull for TensorBoard :type log_dir: str :param batch_size: Number of samples in minibatch to train on each step :type batch_size: int :param memory_length: Maximum size of the Replay Buffer :type memory_length: int :param collect_steps_per_episode: Amount of data to collect for Replay Buffer each episiode :type collect_steps_per_episode: int :param collect_every: Step interval to collect data during training :type collect_every: int :param val_every: Validate the model every X episodes using the `collect_metrics()` function :type val_every: int :param target_update_period: Update the target Q-network every X episodes :type target_update_period: int :param target_update_tau: Parameter for softening the `target_update_period` :type target_update_tau: float :param progressbar: Enable or disable the progressbar for collecting data and training :type progressbar: bool :return: None :rtype: NoneType """ self.episodes = episodes # Total episodes self.warmup_steps = warmup_steps # Amount of warmup steps before training self.batch_size = batch_size # Batch size of Replay Memory self.collect_steps_per_episode = collect_steps_per_episode # Amount of steps to collect data each episode self.collect_every = collect_every # Step interval to collect data during training self.learning_rate = learning_rate # Learning Rate self.gamma = gamma # Discount factor self.min_epsilon = min_epsilon # Minimal chance of choosing random action self.decay_episodes = decay_episodes # Number of episodes to decay from 1.0 to `EPSILON` self.target_update_period = target_update_period # Period for soft updates self.target_update_tau = target_update_tau self.progressbar = progressbar # Enable or disable the progressbar for collecting data and training self.n_step_update = n_step_update self.gradient_clipping = gradient_clipping # Clip the loss self.compiled = False NOW = "DDQN" #datetime.now().strftime("%Y%m%d_%H%M%S") if memory_length is not None: self.memory_length = memory_length # Max Replay Memory length else: self.memory_length = warmup_steps if val_every is not None: self.val_every = val_every # Validate the policy every `val_every` episodes else: self.val_every = self.episodes // min(50, self.episodes) # Can't validate the model 50 times if self.episodes < 50 if model_path is not None: #if os.path.exists(model_path + "/" + NOW + ".pkl"): # os.remove(model_path + "/" + NOW + ".pkl") self.model_path = model_path + "/" + NOW + ".pkl" else: self.model_path = "./models/" + NOW + ".pkl" if log_dir is None: log_dir = "./logs/" + NOW self.writer = tf.summary.create_file_writer(log_dir) def compile_model(self, X_train, y_train, layers: list = [], imb_ratio: float = None, loss_fn=common.element_wise_squared_loss) -> None: """Initializes the neural networks, DDQN-agent, collect policies and replay buffer. :param X_train: Training data for the model. :type X_train: np.ndarray :param y_train: Labels corresponding to `X_train`. 1 for the positive class, 0 for the negative class. :param y_train: np.ndarray :param layers: List of layers to feed into the TF-agents custom Sequential(!) layer. :type layers: list :param imb_ratio: The imbalance ratio of the data. :type imb_ratio: float :param loss_fn: Callable loss function :type loss_fn: tf.compat.v1.losses :return: None :rtype: NoneType """ if imb_ratio is None: imb_ratio = imbalance_ratio(y_train) self.train_env = TFPyEnvironment(ClassifierEnv(X_train, y_train, imb_ratio)) self.global_episode = tf.Variable(0, name="global_episode", dtype=np.int64, trainable=False) # Global train episode counter # Custom epsilon decay: https://github.com/tensorflow/agents/issues/339 epsilon_decay = tf.compat.v1.train.polynomial_decay( 1.0, self.global_episode, self.decay_episodes, end_learning_rate=self.min_epsilon) self.q_net = Sequential(layers, self.train_env.observation_spec()) self.agent = DdqnAgent(self.train_env.time_step_spec(), self.train_env.action_spec(), q_network=self.q_net, optimizer=Adam(learning_rate=self.learning_rate), td_errors_loss_fn=loss_fn, train_step_counter=self.global_episode, target_update_period=self.target_update_period, target_update_tau=self.target_update_tau, gamma=self.gamma, epsilon_greedy=epsilon_decay, n_step_update=self.n_step_update, gradient_clipping=self.gradient_clipping) self.agent.initialize() self.random_policy = RandomTFPolicy(self.train_env.time_step_spec(), self.train_env.action_spec()) self.replay_buffer = TFUniformReplayBuffer(data_spec=self.agent.collect_data_spec, batch_size=self.train_env.batch_size, max_length=self.memory_length) self.warmup_driver = DynamicStepDriver(self.train_env, self.random_policy, observers=[self.replay_buffer.add_batch], num_steps=self.warmup_steps) # Uses a random policy self.collect_driver = DynamicStepDriver(self.train_env, self.agent.collect_policy, observers=[self.replay_buffer.add_batch], num_steps=self.collect_steps_per_episode) # Uses the epsilon-greedy policy of the agent self.agent.train = common.function(self.agent.train) # Optimalization self.warmup_driver.run = common.function(self.warmup_driver.run) self.collect_driver.run = common.function(self.collect_driver.run) self.compiled = True def train(self, *args) -> None: """Starts the training of the model. Includes warmup period, metrics collection and model saving. :param *args: All arguments will be passed to `collect_metrics()`. This can be usefull to pass callables, testing environments or validation data. Overwrite the TrainDDQN.collect_metrics() function to use your own *args. :type *args: Any :return: None :rtype: NoneType, last step is saving the model as a side-effect """ assert self.compiled, "Model must be compiled with model.compile_model(X_train, y_train, layers) before training." # Warmup period, fill memory with random actions if self.progressbar: print(f"\033[92mCollecting data for {self.warmup_steps:_} steps... This might take a few minutes...\033[0m") self.warmup_driver.run(time_step=None, policy_state=self.random_policy.get_initial_state(self.train_env.batch_size)) if self.progressbar: print(f"\033[92m{self.replay_buffer.num_frames():_} frames collected!\033[0m") dataset = self.replay_buffer.as_dataset(sample_batch_size=self.batch_size, num_steps=self.n_step_update + 1, num_parallel_calls=data.experimental.AUTOTUNE).prefetch(data.experimental.AUTOTUNE) iterator = iter(dataset) def _train(): experiences, _ = next(iterator) return self.agent.train(experiences).loss _train = common.function(_train) # Optimalization ts = None policy_state = self.agent.collect_policy.get_initial_state(self.train_env.batch_size) self.collect_metrics(*args) # Initial collection for step 0 for _ in range(self.episodes): if not self.global_episode % self.collect_every: # Collect a few steps using collect_policy and save to `replay_buffer` if self.collect_steps_per_episode != 0: ts, policy_state = self.collect_driver.run(time_step=ts, policy_state=policy_state) # Sample a batch of data from `replay_buffer` and update the agent's network train_loss = _train() if not self.global_episode % self.val_every: with self.writer.as_default(): tf.summary.scalar("train_loss", train_loss, step=self.global_episode) self.collect_metrics(*args) def collect_metrics(self, X_val: np.ndarray, y_val: np.ndarray, save_best: str = None): """Collects metrics using the trained Q-network. :param X_val: Features of validation data, same shape as X_train :type X_val: np.ndarray :param y_val: Labels of validation data, same shape as y_train :type y_val: np.ndarray :param save_best: Saving the best model of all validation runs based on given metric: Choose one of: {Gmean, F1, Precision, Recall, TP, TN, FP, FN} This improves stability since the model at the last episode is not guaranteed to be the best model. :type save_best: str """ y_pred = network_predictions(self.agent._target_q_network, X_val) stats = classification_metrics(y_val, y_pred) avgQ = np.mean(decision_function(self.agent._target_q_network, X_val)) # Max action for each x in X if save_best is not None: if not hasattr(self, "best_score"): # If no best model yet self.best_score = 0.0 if stats.get(save_best) >= self.best_score: # Overwrite best model self.save_network() # Saving directly to avoid shallow copy without trained weights self.best_score = stats.get(save_best) with self.writer.as_default(): tf.summary.scalar("AverageQ", avgQ, step=self.global_episode) # Average Q-value for this epoch for k, v in stats.items(): tf.summary.scalar(k, v, step=self.global_episode) def evaluate(self,X_train,y_train, X_test, y_test): """ Final evaluation of trained Q-network with X_test and y_test. Optional PR and ROC curve comparison to X_train, y_train to ensure no overfitting is taking place. :param X_test: Features of test data, same shape as X_train :type X_test: np.ndarray :param y_test: Labels of test data, same shape as y_train :type y_test: np.ndarray :param X_train: Features of train data :type X_train: np.ndarray :param y_train: Labels of train data :type y_train: np.ndarray """ #if hasattr(self, "best_score"): # print(f"\033[92mBest score: {self.best_score:6f}!\033[0m") # network = self.load_network(self.model_path) # Load best saved model #else: # network = self.agent._target_q_network # Load latest target model #network = self.load_network(self.model_path) #if (X_train is not None) and (y_train is not None): # plot_pr_curve(network, X_test, y_test, X_train, y_train) # plot_roc_curve(network, X_test, y_test, X_train, y_train) y_pred = network_predictions(self.agent._target_q_network, X_test) return classification_metrics(y_test, y_pred) def get_network(self): #network = self.load_network(self.model_path) return self.agent._target_q_network def save_network(self, filename_rl): #usnish """Saves Q-network as pickle to `model_path`.""" with open(self.filename_rl, "wb") as f: # Save Q-network as pickle pickle.dump(self.agent._target_q_network, f) @staticmethod def load_network(fp: str): """Static method to load Q-network pickle from given filepath. :param fp: Filepath to the saved pickle of the network :type fp: str :returns: The network-object loaded from a pickle file. :rtype: tensorflow.keras.models.Model """ with open(fp, "rb") as f: # Load the Q-network network = pickle.load(f) return network
classifierenv.py
import numpy as np from tf_agents.environments.py_environment import PyEnvironment from tf_agents.specs.array_spec import ArraySpec, BoundedArraySpec from tf_agents.trajectories import time_step as ts class ClassifierEnv(PyEnvironment): """ Custom `PyEnvironment` environment for imbalanced classification. Based on https://www.tensorflow.org/agents/tutorials/2_environments_tutorial """ def __init__(self, X_train: np.ndarray, y_train: np.ndarray, imb_ratio: float): """Initialization of environment with X_train and y_train. :param X_train: Features shaped: [samples, ..., ] :type X_train: np.ndarray :param y_train: Labels shaped: [samples] :type y_train: np.ndarray :param imb_ratio: Imbalance ratio of the data :type imb_ratio: float :returns: None :rtype: NoneType """ #print('1') self._action_spec = BoundedArraySpec(shape=(), dtype=np.int32, minimum=0, maximum=(len(np.unique(y_train)) - 1), name="action") #print(y_train) self._observation_spec = ArraySpec(shape=X_train.shape[1:], dtype=X_train.dtype, name="observation") #print('3') self._episode_ended = False self.X_train = X_train self.y_train = y_train self.imb_ratio = imb_ratio # Imbalance ratio: 0 < imb_ratio < 1 self.id = np.arange(self.X_train.shape[0]) # List of IDs to connect X and y data self.episode_step = 0 # Episode step, resets every episode self._state = self.X_train[self.id[self.episode_step]] def action_spec(self): """ Definition of the discrete actionspace. 1 for the positive/minority class, 0 for the negative/majority class. """ return self._action_spec def observation_spec(self): """Definition of the continous statespace e.g. the observations in typical RL environments.""" return self._observation_spec def _reset(self): """Shuffles data and returns the first state of the shuffled data to begin training on new episode.""" np.random.shuffle(self.id) # Shuffle the X and y data self.episode_step = 0 # Reset episode step counter at the end of every episode self._state = self.X_train[self.id[self.episode_step]] self._episode_ended = False # Reset terminal condition return ts.restart(self._state) def _step(self, action: int): """ Take one step in the environment. If the action is correct, the environment will either return 1 or `imb_ratio` depending on the current class. If the action is incorrect, the environment will either return -1 or -`imb_ratio` depending on the current class. """ if self._episode_ended: # The last action ended the episode. Ignore the current action and start a new episode return self.reset() env_action = self.y_train[self.id[self.episode_step]] # The label of the current state self.episode_step += 1 if action == env_action: # Correct action if env_action: # Minority reward = 1 # True Positive else: # Majority reward = self.imb_ratio # True Negative else: # Incorrect action if env_action: # Minority reward = -1 # False Negative self._episode_ended = True # Stop episode when minority class is misclassified else: # Majority reward = -self.imb_ratio # False Positive if self.episode_step == self.X_train.shape[0] - 1: # If last step in data self._episode_ended = True self._state = self.X_train[self.id[self.episode_step]] # Update state with new datapoint if self._episode_ended: return ts.termination(self._state, reward) else: return ts.transition(self._state, reward)
aionMlopsService.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' # -*- coding: utf-8 -*- # -*- coding: utf-8 -*- import logging logging.getLogger('tensorflow').disabled = True import json import mlflow import mlflow.sklearn import mlflow.sagemaker as mfs # from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split # from sklearn import datasets import time import numpy as np # Load dataset # from sklearn.datasets import load_iris import pickle # Load the pickled model # from matplotlib import pyplot import sys import os import boto3 import subprocess import os.path from os.path import expanduser import platform from pathlib import Path class aionMlopsService: def __init__(self,model,mlflowtosagemakerDeploy,mlflowtosagemakerPushOnly,mlflowtosagemakerPushImageName,mlflowtosagemakerdeployModeluri,experiment_name,mlflow_modelname,awsaccesskey_id,awssecretaccess_key,aws_session_token,mlflow_container_name,aws_region,aws_id,iam_sagemakerfullaccess_arn,sm_app_name,sm_deploy_option,delete_ecr_repository,ecrRepositoryName): try: self.model=model self.mlflowtosagemakerDeploy=mlflowtosagemakerDeploy self.mlflowtosagemakerPushOnly=str(mlflowtosagemakerPushOnly) self.mlflowtosagemakerPushImageName=str(mlflowtosagemakerPushImageName) self.mlflowtosagemakerdeployModeluri=str(mlflowtosagemakerdeployModeluri) self.experiment_name=experiment_name self.mlflow_modelname=mlflow_modelname self.awsaccesskey_id=awsaccesskey_id self.awssecretaccess_key=awssecretaccess_key self.aws_session_token=aws_session_token self.mlflow_container_name=mlflow_container_name self.aws_region=aws_region self.aws_id=aws_id self.iam_sagemakerfullaccess_arn=iam_sagemakerfullaccess_arn self.sm_app_name=sm_app_name self.sm_deploy_option=sm_deploy_option self.delete_ecr_repository=delete_ecr_repository self.ecrRepositoryName=ecrRepositoryName from appbe.dataPath import LOG_LOCATION sagemakerLogLocation = LOG_LOCATION try: os.makedirs(sagemakerLogLocation) except OSError as e: if (os.path.exists(sagemakerLogLocation)): pass else: raise OSError('sagemakerLogLocation error.') self.sagemakerLogLocation=str(sagemakerLogLocation) filename_mlops = 'mlopslog_'+str(int(time.time())) filename_mlops=filename_mlops+'.log' # filename = 'mlopsLog_'+Time() filepath = os.path.join(self.sagemakerLogLocation, filename_mlops) logging.basicConfig(filename=filepath, format='%(message)s',filemode='w') # logging.basicConfig(filename="uq_logging.log", format='%(asctime)s %(message)s',filemode='w') # logging.basicConfig(filename="uq_logging.log", format=' %(message)s',filemode='w') # logging.basicConfig(filename='uq_logging.log', encoding='utf-8', level=logging.INFO) self.log = logging.getLogger('aionMLOps') self.log.setLevel(logging.DEBUG) # mlflow.set_experiment(self.experiment_name) except Exception as e: self.log.info('<!------------- mlflow model INIT Error ---------------> '+str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def mlflowSetPath(self,path): track_dir=os.path.join(path,'mlruns') uri="file:"+str(Path(track_dir)) return uri #Currently not used this delete ecr repository option def ecr_repository_delete(self,rep_name): # import subprocess client = boto3.client('ecr') repositories = client.describe_repositories() ecr_delete_rep=client.delete_repository(registryId=self.aws_id,repositoryName=self.ecrRepositoryName,force=True) mlflow_ecr_delete=subprocess.run(['aws', 'ecr', 'delete-repository','--repository-name',rep_name,'||','true']) self.log.info('Success: deleted aws ecr repository which contains mlops image.') def check_sm_deploy_status(self,app_name): sage_client = boto3.client('sagemaker', region_name=self.aws_region) endpoint_description = sage_client.describe_endpoint(EndpointName=app_name) endpoint_status = endpoint_description["EndpointStatus"] try: failure_reason=endpoint_description["FailureReason"] self.log.info("sagemaker end point creation failure reason is: "+str(failure_reason)) except: pass endpoint_status=str(endpoint_status) return endpoint_status def invoke_sm_endpoint(self,app_name, input_json): client = boto3.session.Session().client("sagemaker-runtime", self.aws_region) response = client.invoke_endpoint( EndpointName=app_name, Body=input_json, ContentType='application/json; format=pandas-split', ) # preds = response['Body'].read().decode("ascii") preds = response['Body'].read().decode("ascii") preds = json.loads(preds) # print("preds: {}".format(preds)) return preds def predict_sm_app_endpoint(self,X_test): #print(X_test) import pandas as pd prediction=None AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id) AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key) AWS_SESSION_TOKEN=str(self.aws_session_token) region = str(self.aws_region) #Existing model deploy options # mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName) # mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri) try: import subprocess cmd = 'aws configure set region_name '+region os.system(cmd) cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID os.system(cmd) cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY os.system(cmd) ''' aws_region=subprocess.run(['aws', 'configure', 'set','region_name',region]) aws_accesskeyid=subprocess.run(['aws', 'configure', 'set','aws_access_key_id',AWS_ACCESS_KEY_ID]) aws_secretaccesskey=subprocess.run(['aws', 'configure', 'set','aws_secret_access_key',AWS_SECRET_ACCESS_KEY]) ''' except: pass #Create a session for aws communication using aws boto3 lib # s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) # s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY) session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) #X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=2) # query_input = pd.DataFrame(X_test).iloc[[1,5]].to_json(orient="split") try: query_input = pd.DataFrame(X_test).to_json(orient="split") #print(query_input) prediction = self.invoke_sm_endpoint(app_name=self.sm_app_name, input_json=query_input) # self.log.info("sagemaker end point Prediction: \n"+str(prediction)) except Exception as e: print(e) return prediction def deleteSagemakerApp(self,app_name,region): # import mlflow.sagemaker as mfs # region = 'ap-south-1' # app_name = 'aion-demo-app' mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300) # print("AION mlops sagemaker application endpoint is deleted....\n") self.log.info('AION mlops sagemaker application endpoint is deleted, application name is: '+str(app_name)) def deployModel2sagemaker(self,mlflow_container_name,tag_id,model_path): region = str(self.aws_region) aws_id = str(self.aws_id) iam_sagemakerfullaccess_arn = str(self.iam_sagemakerfullaccess_arn) app_name = str(self.sm_app_name) model_uri = str(model_path) app_status=False mlflow_root_dir = None try: os.chdir(str(self.sagemakerLogLocation)) mlflow_root_dir = os.getcwd() self.log.info('mlflow root dir: '+str(mlflow_root_dir)) except: self.log.info("path issue.") try: c_status=self.check_sm_deploy_status(app_name) #if ((c_status == "Failed") or (c_status == "OutOfService")): if ((c_status == "Failed") or (c_status.lower() == "failed")): app_status=False self.log.info("Sagemaker endpoint status: Failed.\n") mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300) elif ((c_status.lower() == "inservice") or (c_status == "InService")): app_status=True self.log.info("Sagemaker endpoint status: InService. Running sagemaker endpoint name: \n"+str(app_name)) else: app_status=False pass except: # print("deploy status error.\n") pass #aws ecr model app_name should contain only [[a-zA-Z0-9-]] import re if app_name: pattern = re.compile("[A-Za-z0-9-]+") # if found match (entire string matches pattern) if pattern.fullmatch(app_name) is not None: #print("Found match: ") pass else: app_name = 'aion-demo-app' else: app_name = 'aion-demo-app' mlflow_image=mlflow_container_name+':'+tag_id image_url = aws_id + '.dkr.ecr.' + region + '.amazonaws.com/' + mlflow_image deploy_option="create" self.log.info('deploy_option: \n'+str(deploy_option)) if (deploy_option.lower() == "create"): # Other deploy modes: mlflow.sagemaker.DEPLOYMENT_MODE_ADD,mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE if not (app_status): try: mfs.deploy(app_name=app_name,model_uri=model_uri,region_name=region,mode="create",execution_role_arn=iam_sagemakerfullaccess_arn,image_url=image_url) self.log.info('sagemaker endpoint created and model deployed. Application name is: \n'+str(app_name)) except: self.log.info('Creating end point application issue.Please check the connection and aws credentials \n') else: self.log.info('Sagemaker application with user endpoint name already running.Please check. Please delete the old endpoint with same name.\n') elif (deploy_option.lower() == "delete"): # import mlflow.sagemaker as mfs # # region = 'ap-south-1' # # app_name = 'aion-demo-app' # mfs.delete(app_name=app_name,region_name=region, archive=False,synchronous=True, timeout_seconds=300) # print("Mlflow sagemaker application endpoint is deleted....\n") # self.log.info('Mlflow sagemaker application endpoint is deleted, application name is: '+str(app_name)) pass elif (deploy_option.lower() == "add"): pass elif (deploy_option.lower() == "replace"): pass else: pass return app_status def mlflow2sagemaker_deploy(self): self.log.info('<!------------- Inside AION mlops to sagemaker communication and deploy process. ---------------> ') deploy_status=False app_name = str(self.sm_app_name) self.log.info('Sagemaker Application Name: '+str(app_name)) uri_mlflow=self.mlflowSetPath(self.sagemakerLogLocation) mlflow.set_tracking_uri(uri_mlflow) mlops_trackuri=mlflow.get_tracking_uri() mlops_trackuri=str(mlops_trackuri) self.log.info('mlops tracking uri: '+str(mlops_trackuri)) localhost_deploy=False try: #Loading aion model to deploy in sagemaker mlflow.set_experiment(self.experiment_name) self.log.info('Endpoint Name: '+str(self.experiment_name)) # Assume, the model already loaded from joblib in aionmlflow2smInterface.py file. aionmodel2deploy=self.model # run_id = None # experiment_id=None # Use the loaded pickled model to make predictions # pred = knn_from_pickle.predict(X_test) with mlflow.start_run(run_name='AIONMLOps') as run: # aionmodel2deploy.fit(X_train, y_train) # predictions = aionmodel2deploy.predict(X_test) mlflow.sklearn.log_model(aionmodel2deploy, self.mlflow_modelname) run_id = run.info.run_uuid experiment_id = run.info.experiment_id self.log.info('AION mlops experiment run_id: '+str(run_id)) self.log.info('AION mlops experiment experiment_id: '+str(experiment_id)) self.log.info('AION mlops experiment model_name: '+str(self.mlflow_modelname)) artifact_uri = {mlflow.get_artifact_uri()} # print("1.artifact_uri: \n",artifact_uri) mlflow.end_run() #If we need, we can check the mlflow experiments. # try: # mlflow_client = mlflow.tracking.MlflowClient('./mlruns') # exp_list = mlflow_client.list_experiments() # except: # pass #print("mlflow exp_list: \n",exp_list) mlflow_modelname=str(self.mlflow_modelname) mlops_trackuri=mlops_trackuri.replace('file:','') mlops_trackuri=str(mlops_trackuri) # mlflow_root_dir = os.getcwd() mlflow_root_dir = None try: os.chdir(str(self.sagemakerLogLocation)) mlflow_root_dir = os.getcwd() self.log.info('mlflow root dir: '+str(mlflow_root_dir)) except: self.log.info("path issue.") model_path = 'mlruns/%s/%s/artifacts/%s' % (experiment_id, run_id,self.mlflow_modelname) # model_path=mlops_trackuri+'\\%s\\%s\\artifacts\\%s' % (experiment_id, run_id,mlflow_modelname) self.log.info("local host aion mlops model_path is: "+str(model_path)) time.sleep(2) #print("Environment variable setup in the current working dir for aws sagemaker cli connection... \n") self.log.info('Environment variable setup in the current working dir for aws sagemaker cli connection... \n ') AWS_ACCESS_KEY_ID=str(self.awsaccesskey_id) AWS_SECRET_ACCESS_KEY=str(self.awssecretaccess_key) AWS_SESSION_TOKEN=str(self.aws_session_token) region = str(self.aws_region) #Existing model deploy options mlflowtosagemakerPushImageName=str(self.mlflowtosagemakerPushImageName) mlflowtosagemakerdeployModeluri=str(self.mlflowtosagemakerdeployModeluri) import subprocess cmd = 'aws configure set region_name '+region os.system(cmd) cmd = 'aws configure set aws_access_key_id '+AWS_ACCESS_KEY_ID os.system(cmd) cmd = 'aws configure set aws_secret_access_key '+AWS_SECRET_ACCESS_KEY os.system(cmd) #Create a session for aws communication using aws boto3 lib # s3_client = boto3.client('ecr',aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) # s3 = boto3.resource('ecr', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key= AWS_SECRET_ACCESS_KEY) session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,aws_session_token=AWS_SESSION_TOKEN,region_name=region) # session = boto3.session.Session( # aws_access_key_id=AWS_ACCESS_KEY_ID, # aws_secret_access_key=AWS_SECRET_ACCESS_KEY, # aws_session_token=AWS_SESSION_TOKEN # ) # awsclient = session.resource('ecr') # s3 = session.resource('s3') self.log.info('aws environment variable setup done... \n') try: os.chdir(mlflow_root_dir) except FileNotFoundError: self.log.info('Directory does not exist. '+str(mlflow_root_dir)) except NotADirectoryError: self.log.info('model_path is not a directory. '+str(mlflow_root_dir)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir)) mlflow_container_name=str(self.mlflow_container_name) mlflow_version=mlflow.__version__ tag_id=mlflow_version if (self.mlflowtosagemakerPushOnly.lower() == "true"): self.log.info('Selected option is <Deploy existing model to sagemaker> \n') aws_id=str(self.aws_id) arn=str(self.iam_sagemakerfullaccess_arn) mlflow_image=mlflow_container_name+':'+tag_id image_url = aws_id+'.dkr.ecr.'+region+'.amazonaws.com/'+mlflow_image # print("image_url:========= \n",image_url) deploy_status=True try: model_path=mlflowtosagemakerdeployModeluri # ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns. self.log.info('Deploy existing model container-Model path given by user: '+str(model_path)) try: os.chdir(model_path) except FileNotFoundError: self.log.info('Directory does not exist. '+str(model_path)) except NotADirectoryError: self.log.info('model_path is not a directory. '+str(model_path)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(model_path)) try: mfs.push_image_to_ecr(image=mlflowtosagemakerPushImageName) deploy_status=True self.log.info('AION mlops pushed the docker container to aws ecr. \n ') except: self.log.info("error in pushing existing container to ecr.\n") deploy_status=False time.sleep(2) #Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir. try: # print(" Changing directory to mlflow root dir....\n") os.chdir(mlflow_root_dir) except FileNotFoundError: self.log.info('model path is not a directory. '+str(mlflow_root_dir)) except NotADirectoryError: self.log.info('model path is not a directory. '+str(mlflow_root_dir)) # print("{0} is not a directory".format(mlflow_root_dir)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir)) # self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri) try: if (deploy_status): self.deployModel2sagemaker(mlflowtosagemakerPushImageName,tag_id,mlflowtosagemakerdeployModeluri) self.log.info('AION creates docker container and push the container into aws ecr.. ') time.sleep(2) except: self.log.info('AION deploy error.check connection and aws config parameters. ') deploy_status=False # self.log.info('model deployed in sagemaker. ') except Exception as e: self.log.info('AION mlops failed to push docker container in aws ecr, check configuration parameters. \n'+str(e)) elif (self.mlflowtosagemakerPushOnly.lower() == "false"): if (self.mlflowtosagemakerDeploy.lower() == "true"): self.log.info('Selected option is <Create and Deploy model> \n') deploy_status=True try: # ##We need to run mlflow docker container command in the artifacts->model directory inside mlruns. try: os.chdir(model_path) except FileNotFoundError: self.log.info('Directory does not exist. '+str(model_path)) except NotADirectoryError: self.log.info('model_path is not a directory. '+str(model_path)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(model_path)) try: mlflow_container_push=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--push','--container',mlflow_container_name]) self.log.info('AION mlops creates docker container and push the container into aws ecr.. ') deploy_status=True time.sleep(2) except: self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.') deploy_status=False self.log.info('Now deploying the model container to sagemaker starts....\n ') # Once docker push completes, again going back to mlflow parent dir for deployment #Now,change the working dir to root dir,because now deploy needs full mlruns to model name dir. try: os.chdir(mlflow_root_dir) except FileNotFoundError: self.log.info('model_path does not exist. '+str(mlflow_root_dir)) except NotADirectoryError: self.log.info('model_path is not a directory. '+str(mlflow_root_dir)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(mlflow_root_dir)) # app_name = str(self.sm_app_name) try: if (deploy_status): self.deployModel2sagemaker(mlflow_container_name,tag_id,model_path) except: self.log.info('mlops deploy error.check connection') deploy_status=False except Exception as e: exc = {"status":"FAIL","message":str(e).strip('"')} out_exc = json.dumps(exc) self.log.info('mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\n') elif(self.mlflowtosagemakerDeploy.lower() == "false"): deploy_status=False localhost_deploy=True self.log.info('Selected option is <Create AION mlops container in local host .> \n') self.log.info("User selected create-Deploy sagemaker option as False,") self.log.info("Creates the AION mlops-sagemaker container locally starting,but doesn't push into aws ecr and deploy in sagemaker. Check the container in docker repository. ") try: # ##We need to run AION mlops docker container command in the artifacts->model directory inside mlruns. try: os.chdir(model_path) self.log.info('After change to AION mlops model dir, cwd: '+str(model_path)) except FileNotFoundError: self.log.info('Directory does not exist. '+str(model_path)) except NotADirectoryError: self.log.info('model_path is not a directory. '+str(model_path)) except PermissionError: self.log.info('Issue in permissions to change to model dir. '+str(model_path)) # mlflow_container_local=subprocess.run(['AION mlops', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name]) try: if not (deploy_status): mlflow_container_local=subprocess.run(['mlflow', 'sagemaker', 'build-and-push-container','--build','--no-push','--container',mlflow_container_name]) self.log.info('AION creates local host bsed docker container and push the container local docker repository. Check with <docker images> command.\n ') localhost_deploy=True time.sleep(2) except: self.log.info('error in pushing aion model container to sagemaker, please check the connection between local host to aws server.') deploy_status=False localhost_deploy=False # print("AION mlops creates docker container and push the container into aws ecr.\n") self.log.info('AION mlops creates docker container and stored locally... ') time.sleep(2) except Exception as e: localhost_deploy=False # print("mlflow failed to creates docker container please check the aws iam,ecr permission setup, aws id access_key,secret key values for aion.\n") self.log.info('AION mlops failed to creates docker container in local machine.\n'+str(e)) else: self.log.info('Deploy option not selected, Please check. ') localhost_deploy=False deploy_status=False else: pass localhost_container_status="Notdeployed" mlflow2sm_deploy_status="Notdeployed" if localhost_deploy: localhost_container_status="success" mlflow2sm_deploy_status="Notdeployed" # print("AION creates local docker container successfully.Please check in docker repository.") self.log.info("AION creates local docker container successfully.Please check in docker repository.") # else: # localhost_container_status="failed" # # print("AION failed to create local docker container successfully.Please check in docker repository.") # self.log.info("AION failed to create local docker container successfully.Please check in docker repository.") if (deploy_status): # Finally checking whether mlops model is deployed to sagemaker or not. app_name = str(self.sm_app_name) deploy_s = self.check_sm_deploy_status(app_name) if (deploy_s == "InService"): # print("AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\n") self.log.info('AION mlops model is deployed at aws sagemaker, use application name(app_name) and region to access.\n'+str(app_name)) mlflow2sm_deploy_status="success" localhost_container_status="Notdeployed" else: # print("AION Mlflow model not able to deploy at aws sagemaker\n") self.log.info('AION mlops model not able to deploy at aws sagemaker.\n') mlflow2sm_deploy_status="failed" localhost_container_status="Notdeployed" # else: # mlflow2sm_deploy_status="None" return mlflow2sm_deploy_status,localhost_container_status except Exception as inst: exc = {"status":"FAIL","message":str(inst).strip('"')} out_exc = json.dumps(exc)
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
baseline.py
import joblib import pandas as pd import sys import math import time import pandas as pd import numpy as np from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix from sklearn.svm import SVC from sklearn.linear_model import LinearRegression import argparse import json def mltesting(modelfile,datafile,features,target): model = joblib.load(modelfile) ProblemName = model.__class__.__name__ if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecissionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighborsClassifier','DecisionTreeClassifier','GradientBoostingClassifier','XGBClassifier','LGBMClassifier','CatBoostClassifier']: Problemtype = 'Classification' elif ProblemName in ['LinearRegression','Lasso','Ridge','DecisionTreeRegressor','RandomForestRegressor','GradientBoostingRegressor','XGBRegressor','LGBMRegressor','CatBoostRegressor']: Problemtype = 'Regression' else: Problemtype = 'Unknown' if Problemtype == 'Classification': Params = model.get_params() try: df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True) if ProblemName == 'LogisticRegression' or ProblemName == 'DecisionTreeClassifier' or ProblemName == 'RandomForestClassifier' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsClassifier' or ProblemName == 'GradientBoostingClassifier' or ProblemName == 'SVC': features = model.feature_names_in_ elif ProblemName == 'XGBClassifier': features = model.get_booster().feature_names elif ProblemName == 'LGBMClassifier': features = model.feature_name_ elif ProblemName == 'CatBoostClassifier': features = model.feature_names_ modelfeatures = features dfp = df[modelfeatures] tar = target target = df[tar] predic = model.predict(dfp) output = {} matrixconfusion = pd.DataFrame(confusion_matrix(predic,target)) matrixconfusion = matrixconfusion.to_json(orient='index') classificationreport = pd.DataFrame(classification_report(target,predic,output_dict=True)).transpose() classificationreport = round(classificationreport,2) classificationreport = classificationreport.to_json(orient='index') output["Precision"] = "%.2f" % precision_score(target, predic,average='weighted') output["Recall"] = "%.2f" % recall_score(target, predic,average='weighted') output["Accuracy"] = "%.2f" % accuracy_score(target, predic) output["ProblemName"] = ProblemName output["Status"] = "Success" output["Params"] = Params output["Problemtype"] = Problemtype output["Confusionmatrix"] = matrixconfusion output["classificationreport"] = classificationreport # import statistics # timearray = [] # for i in range(0,5): # start = time.time() # predic1 = model.predict(dfp.head(1)) # end = time.time() # timetaken = (round((end - start) * 1000,2),'Seconds') # timearray.append(timetaken) # print(timearray) start = time.time() for i in range(0,5): predic1 = model.predict(dfp.head(1)) end = time.time() timetaken = (round((end - start) * 1000,2),'Seconds') # print(timetaken) start1 = time.time() for i in range(0,5): predic2 = model.predict(dfp.head(10)) end1 = time.time() timetaken1 = (round((end1 - start1) * 1000,2) ,'Seconds') # print(timetaken1) start2 = time.time() for i in range(0,5): predic3 = model.predict(dfp.head(100)) end2 = time.time() timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds') # print(timetaken2) output["onerecord"] = timetaken output["tenrecords"] = timetaken1 output["hundrecords"] = timetaken2 print(json.dumps(output)) except Exception as e: output = {} output['Problemtype']='Classification' output['Status']= "Fail" output["ProblemName"] = ProblemName output["Msg"] = 'Detected Model : {} \\n Problem Type : Classification \\n Error : {}'.format(ProblemName, str(e).replace('"','//"').replace('\n', '\\n')) print(output["Msg"]) print(json.dumps(output)) elif Problemtype == 'Regression': Params = model.get_params() try: df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True) if ProblemName == 'LinearRegression' or ProblemName == 'Lasso' or ProblemName == 'Ridge' or ProblemName == 'DecisionTreeRegressor' or ProblemName == 'RandomForestRegressor' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsRegressor' or ProblemName == 'GradientBoostingRegressor': features = model.feature_names_in_ elif ProblemName == 'XGBRegressor': features = model.get_booster().feature_names elif ProblemName == 'LGBMRegressor': features = model.feature_name_ elif ProblemName == 'CatBoostRegressor': features = model.feature_names_ modelfeatures = features dfp = df[modelfeatures] tar = target target = df[tar] predict = model.predict(dfp) mse = mean_squared_error(target, predict) mae = mean_absolute_error(target, predict) rmse = math.sqrt(mse) r2 = r2_score(target,predict,multioutput='variance_weighted') output = {} output["MSE"] = "%.2f" % mean_squared_error(target, predict) output["MAE"] = "%.2f" % mean_absolute_error(target, predict) output["RMSE"] = "%.2f" % math.sqrt(mse) output["R2"] = "%.2f" %r2_score(target,predict,multioutput='variance_weighted') output["ProblemName"] = ProblemName output["Problemtype"] = Problemtype output["Params"] = Params output['Status']='Success' start = time.time() predic1 = model.predict(dfp.head(1)) end = time.time() timetaken = (round((end - start) * 1000,2) ,'Seconds') # print(timetaken) start1 = time.time() predic2 = model.predict(dfp.head(10)) end1 = time.time() timetaken1 = (round((end1 - start1) * 1000,2),'Seconds') # print(timetaken1) start2 = time.time() predic3 = model.predict(dfp.head(100)) end2 = time.time() timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds') # print(timetaken2) output["onerecord"] = timetaken output["tenrecords"] = timetaken1 output["hundrecords"] = timetaken2 print(json.dumps(output)) except Exception as e: output = {} output['Problemtype']='Regression' output['Status']='Fail' output["ProblemName"] = ProblemName output["Msg"] = 'Detected Model : {} \\n Problem Type : Regression \\n Error : {}'.format(ProblemName, str(e).replace('"','//"').replace('\n', '\\n')) print(json.dumps(output)) else: output = {} output['Problemtype']='Unknown' output['Status']='Fail' output['Params'] = '' output["ProblemName"] = ProblemName output["Msg"] = 'Detected Model : {} \\n Error : {}'.format(ProblemName, 'Model not supported') print(json.dumps(output)) return(json.dumps(output)) def baseline_testing(modelFile,csvFile,features,target): features = [x.strip() for x in features.split(',')] return mltesting(modelFile,csvFile,features,target)
item_rating.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import pandas as pd import numpy as np import os import datetime, time, timeit from sklearn.model_selection import KFold from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report import pickle import logging class recommendersystem(): def __init__(self,features,svd_params): self.features = features self.svd_input = svd_params self.log = logging.getLogger('eion') print ("recommendersystem starts \n") #To extract dict key,values def extract_params(self,dict): self.dict=dict for k,v in self.dict.items(): return k,v def recommender_model(self,df,outputfile): from sklearn.metrics.pairwise import cosine_similarity from utils.file_ops import save_csv USER_ITEM_MATRIX = 'user_item_matrix' ITEM_SIMILARITY_MATRIX = 'item_similarity_matrix' selectedColumns = self.features.split(',') data = pd.DataFrame() for i in range(0,len(selectedColumns)): data[selectedColumns[i]] = df[selectedColumns[i]] dataset = data self.log.info('-------> Top(5) Rows') self.log.info(data.head(5)) start = time.time() self.log.info('\n----------- Recommender System Training Starts -----------') #--------------- Task 11190:recommender system changes Start ---Usnish------------------# # selectedColumns = ['userId', 'movieId', 'rating'] df_eda = df.groupby(selectedColumns[1]).agg(mean_rating=(selectedColumns[2], 'mean'),number_of_ratings=(selectedColumns[2], 'count')).reset_index() self.log.info('-------> Top 10 most rated Items:') self.log.info(df_eda.sort_values(by='number_of_ratings', ascending=False).head(10)) matrix = data.pivot_table(index=selectedColumns[1], columns=selectedColumns[0], values=selectedColumns[2]) relative_file = os.path.join(outputfile, 'data', USER_ITEM_MATRIX + '.csv') matrix.to_csv(relative_file) item_similarity_cosine = cosine_similarity(matrix.fillna(0)) item_similarity_cosine = pd.DataFrame(item_similarity_cosine,columns=pd.Series([i + 1 for i in range(item_similarity_cosine.shape[0])],name='ItemId'),index=pd.Series([i + 1 for i in range(item_similarity_cosine.shape[0])],name='ItemId')) self.log.info('---------> Item-Item Similarity matrix created:') self.log.info(item_similarity_cosine.head(5)) relative_file = os.path.join(outputfile, 'data', ITEM_SIMILARITY_MATRIX + '.csv') save_csv(item_similarity_cosine,relative_file) # --------------- recommender system changes End ---Usnish------------------# executionTime=time.time() - start self.log.info("------->Execution Time: "+str(executionTime)) self.log.info('----------- Recommender System Training End -----------\n') return "filename",matrix,"NA","",""
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
text_similarity.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import numpy as np import pickle import pandas as pd import sys import time import os from os.path import expanduser import platform from sklearn.preprocessing import binarize import logging import tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.keras import preprocessing from sklearn.metrics import roc_auc_score from sklearn.metrics import accuracy_score from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.layers import Input, Embedding, LSTM, Lambda import tensorflow.keras.backend as K from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.layers import Concatenate from tensorflow.keras.layers import Input, Dense, Flatten, GlobalMaxPool2D, GlobalAvgPool2D, Concatenate, Multiply, Dropout, Subtract, Add, Conv2D from sklearn.metrics.pairwise import cosine_similarity, cosine_distances import tensorflow.keras.backend as K from tensorflow.keras.models import Model, Sequential from tensorflow.keras import layers, utils, callbacks, optimizers, regularizers ## Keras subclassing based siamese network class siameseNetwork(Model): def __init__(self, activation,inputShape, num_iterations): self.activation=activation self.log = logging.getLogger('eion') super(siameseNetwork, self).__init__() i1 = layers.Input(shape=inputShape) i2 = layers.Input(shape=inputShape) featureExtractor = self.build_feature_extractor(inputShape, num_iterations) f1 = featureExtractor(i1) f2 = featureExtractor(i2) #distance vect distance = layers.Concatenate()([f1, f2]) cosine_loss = tf.keras.losses.CosineSimilarity(axis=1) c_loss=cosine_loss(f1, f2) similarity = tf.keras.layers.Dot(axes=1,normalize=True)([f1,f2]) outputs = layers.Dense(1, activation="sigmoid")(distance) self.model = Model(inputs=[i1, i2], outputs=outputs) ##Build dense sequential layers def build_feature_extractor(self, inputShape, num_iterations): layers_config = [layers.Input(inputShape)] for i, n_units in enumerate(num_iterations): layers_config.append(layers.Dense(n_units)) layers_config.append(layers.Dropout(0.2)) layers_config.append(layers.BatchNormalization()) layers_config.append(layers.Activation(self.activation)) model = Sequential(layers_config, name='feature_extractor') return model def call(self, x): return self.model(x) def euclidean_distance(vectors): (f1, f2) = vectors sumSquared = K.sum(K.square(f1 - f2), axis=1, keepdims=True) return K.sqrt(K.maximum(sumSquared, K.epsilon())) def cosine_similarity(vectors): (f1, f2) = vectors f1 = K.l2_normalize(f1, axis=-1) f2 = K.l2_normalize(f2, axis=-1) return K.mean(f1 * f2, axis=-1, keepdims=True) def cos_dist_output_shape(shapes): shape1, shape2 = shapes return (shape1[0],1) class eion_similarity_siamese: def __init__(self): self.log = logging.getLogger('eion') def siamese_model(self,df,col1,col2,targetColumn,conf,pipe,deployLocation,iterName,iterVersion,testPercentage,predicted_data_file): try: self.log.info('-------> Read Embedded File') home = expanduser("~") if platform.system() == 'Windows': modelsPath = os.path.join(home,'AppData','Local','HCLT','AION','PreTrainedModels','TextSimilarity') else: modelsPath = os.path.join(home,'HCLT','AION','PreTrainedModels','TextSimilarity') if os.path.isdir(modelsPath) == False: os.makedirs(modelsPath) embedding_file_path = os.path.join(modelsPath,'glove.6B.100d.txt') if not os.path.exists(embedding_file_path): from pathlib import Path import urllib.request import zipfile location = modelsPath local_file_path = os.path.join(location,"glove.6B.zip") file_test, header_test = urllib.request.urlretrieve('http://nlp.stanford.edu/data/wordvecs/glove.6B.zip', local_file_path) with zipfile.ZipFile(local_file_path, 'r') as zip_ref: zip_ref.extractall(location) os.unlink(os.path.join(location,"glove.6B.zip")) if os.path.isfile(os.path.join(location,"glove.6B.50d.txt")): os.unlink(os.path.join(location,"glove.6B.50d.txt")) if os.path.isfile(os.path.join(location,"glove.6B.300d.txt")): os.unlink(os.path.join(location,"glove.6B.300d.txt")) if os.path.isfile(os.path.join(location,"glove.6B.200d.txt")): os.unlink(os.path.join(location,"glove.6B.200d.txt")) X = df[[col1,col2]] Y = df[targetColumn] testPercentage = testPercentage self.log.info('\n-------------- Test Train Split ----------------') if testPercentage == 0: xtrain=X ytrain=Y xtest=X ytest=Y else: testSize=testPercentage/100 self.log.info('-------> Split Type: Random Split') self.log.info('-------> Train Percentage: '+str(testSize)) X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=testSize) self.log.info('-------> Train Data Shape: '+str(X_train.shape)+' ---------->') self.log.info('-------> Test Data Shape: '+str(X_test.shape)+' ---------->') self.log.info('-------------- Test Train Split End ----------------\n') self.log.info('\n-------------- Train Validate Split ----------------') X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.20, random_state=42) self.log.info('-------> Train Data Shape: '+str(X_train.shape)+' ---------->') self.log.info('-------> Validate Data Shape: '+str(X_val.shape)+' ---------->') self.log.info('-------------- Train Validate Split End----------------\n') self.log.info('Status:- |... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test') train_sentence1 = pipe.texts_to_sequences(X_train[col1].values) train_sentence2 = pipe.texts_to_sequences(X_train[col2].values) val_sentence1 = pipe.texts_to_sequences(X_val[col1].values) val_sentence2 = pipe.texts_to_sequences(X_val[col2].values) len_vec = [len(sent_vec) for sent_vec in train_sentence1] max_len = np.max(len_vec) len_vec = [len(sent_vec) for sent_vec in train_sentence2] if (max_len < np.max(len_vec)): max_len = np.max(len_vec) train_sentence1 = pad_sequences(train_sentence1, maxlen=max_len, padding='post') train_sentence2 = pad_sequences(train_sentence2, maxlen=max_len, padding='post') val_sentence1 = pad_sequences(val_sentence1, maxlen=max_len, padding='post') val_sentence2 = pad_sequences(val_sentence2, maxlen=max_len, padding='post') y_train = y_train.values y_val = y_val.values activation = str(conf['activation']) model = siameseNetwork(activation,inputShape=train_sentence1.shape[1], num_iterations=[10]) model.compile( loss="binary_crossentropy", optimizer=optimizers.Adam(learning_rate=0.0001), metrics=["accuracy"]) es = callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=1, restore_best_weights=True) rlp = callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.1, patience=2, min_lr=1e-10, mode='min', verbose=1 ) x_valid=X_val y_valid=y_val n_epoch = int(conf['num_epochs']) batch_size = int(conf['batch_size']) similarityIndex = conf['similarityIndex'] model.fit([train_sentence1,train_sentence2],y_train.reshape(-1,1), epochs = n_epoch,batch_size=batch_size, validation_data=([val_sentence1, val_sentence2],y_val.reshape(-1,1)),callbacks=[es, rlp]) scores = model.evaluate([val_sentence1, val_sentence2], y_val.reshape(-1,1), verbose=0) self.log.info('-------> Model Score Matrix: Accuracy') self.log.info('-------> Model Score (Validate Data) : '+str(scores[1])) self.log.info('Status:- |... Algorithm applied: SIAMESE') test_sentence1 = pipe.texts_to_sequences(X_test[col1].values) test_sentence2 = pipe.texts_to_sequences(X_test[col2].values) test_sentence1 = pad_sequences(test_sentence1, maxlen=max_len, padding='post') test_sentence2 = pad_sequences(test_sentence2, maxlen=max_len, padding='post') prediction = model.predict([test_sentence1, test_sentence2 ]) n_epoch = conf['num_epochs'] batch_size = conf['batch_size'] activation = conf['activation'] similarityIndex = conf['similarityIndex'] self.log.info('-------> similarityIndex : '+str(similarityIndex)) prediction = np.where(prediction > similarityIndex,1,0) rocauc_sco = roc_auc_score(y_test,prediction) acc_sco = accuracy_score(y_test, prediction) predict_df = pd.DataFrame() predict_df['actual'] = y_test predict_df['predict'] = prediction predict_df.to_csv(predicted_data_file) self.log.info('-------> Model Score Matrix: Accuracy') self.log.info('-------> Model Score (Validate Data) : '+str(scores[1])) self.log.info('Status:- |... Algorithm applied: SIAMESE') test_sentence1 = pipe.texts_to_sequences(X_test[col1].values) test_sentence2 = pipe.texts_to_sequences(X_test[col2].values) test_sentence1 = pad_sequences(test_sentence1, maxlen=max_len, padding='post') test_sentence2 = pad_sequences(test_sentence2, maxlen=max_len, padding='post') prediction = model.predict([test_sentence1, test_sentence2 ]) prediction = np.where(prediction > similarityIndex,1,0) rocauc_sco = roc_auc_score(y_test,prediction) acc_sco = accuracy_score(y_test, prediction) predict_df = pd.DataFrame() predict_df['actual'] = y_test predict_df['predict'] = prediction predict_df.to_csv(predicted_data_file) self.log.info("predict_df: \n"+str(predict_df)) sco = acc_sco self.log.info('-------> Test Data Accuracy Score : '+str(acc_sco)) self.log.info('Status:- |... Testing Score: '+str(acc_sco)) self.log.info('-------> Test Data ROC AUC Score : '+str(rocauc_sco)) matrix = '"Accuracy":'+str(acc_sco)+',"ROC AUC":'+str(rocauc_sco) prediction = model.predict([train_sentence1, train_sentence2]) prediction = np.where(prediction > similarityIndex,1,0) train_rocauc_sco = roc_auc_score(y_train,prediction) train_acc_sco = accuracy_score(y_train, prediction) self.log.info('-------> Train Data Accuracy Score : '+str(train_acc_sco)) self.log.info('-------> Train Data ROC AUC Score : '+str(train_rocauc_sco)) trainmatrix = '"Accuracy":'+str(train_acc_sco)+',"ROC AUC":'+str(train_rocauc_sco) model_tried = '{"Model":"SIAMESE","Score":'+str(sco)+'}' saved_model = 'textsimilarity_'+iterName+'_'+iterVersion # filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion+'.sav') # filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion+'.h5') ## Because we are using subclassing layer api, please use dir (as below) to store deep learn model instead of .h5 model. filename = os.path.join(deployLocation,'model','textsimilarity_'+iterName+'_'+iterVersion) model.save(filename) # model.save_weights(filename) model_name = 'SIAMESE MODEL' return(model_name,scores[1],matrix,trainmatrix,model_tried,saved_model,filename,max_len,similarityIndex) except Exception as inst: self.log.info("SIAMESE failed " + str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
pipelines.py
import itertools import logging from typing import Optional, Dict, Union from nltk import sent_tokenize import torch from transformers import( AutoModelForSeq2SeqLM, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer, ) logger = logging.getLogger(__name__) class QGPipeline: """Poor man's QG pipeline""" def __init__( self, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, ans_model: PreTrainedModel, ans_tokenizer: PreTrainedTokenizer, qg_format: str, use_cuda: bool ): self.model = model self.tokenizer = tokenizer self.ans_model = ans_model self.ans_tokenizer = ans_tokenizer self.qg_format = qg_format self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu" self.model.to(self.device) if self.ans_model is not self.model: self.ans_model.to(self.device) assert self.model.__class__.__name__ in ["T5ForConditionalGeneration", "BartForConditionalGeneration"] if "T5ForConditionalGeneration" in self.model.__class__.__name__: self.model_type = "t5" else: self.model_type = "bart" def __call__(self, inputs: str): inputs = " ".join(inputs.split()) sents, answers = self._extract_answers(inputs) flat_answers = list(itertools.chain(*answers)) if len(flat_answers) == 0: return [] if self.qg_format == "prepend": qg_examples = self._prepare_inputs_for_qg_from_answers_prepend(inputs, answers) else: qg_examples = self._prepare_inputs_for_qg_from_answers_hl(sents, answers) qg_inputs = [example['source_text'] for example in qg_examples] questions = self._generate_questions(qg_inputs) output = [{'answer': example['answer'], 'question': que} for example, que in zip(qg_examples, questions)] return output def _generate_questions(self, inputs): inputs = self._tokenize(inputs, padding=True, truncation=True) outs = self.model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), max_length=32, num_beams=4, ) questions = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in outs] return questions def _extract_answers(self, context): sents, inputs = self._prepare_inputs_for_ans_extraction(context) inputs = self._tokenize(inputs, padding=True, truncation=True) outs = self.ans_model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), max_length=32, ) dec = [self.ans_tokenizer.decode(ids, skip_special_tokens=False) for ids in outs] answers = [item.split('<sep>') for item in dec] answers = [i[:-1] for i in answers] return sents, answers def _tokenize(self, inputs, padding=True, truncation=True, add_special_tokens=True, max_length=512 ): inputs = self.tokenizer.batch_encode_plus( inputs, max_length=max_length, add_special_tokens=add_special_tokens, truncation=truncation, padding="max_length" if padding else False, pad_to_max_length=padding, return_tensors="pt" ) return inputs def _prepare_inputs_for_ans_extraction(self, text): sents = sent_tokenize(text) inputs = [] for i in range(len(sents)): source_text = "extract answers:" for j, sent in enumerate(sents): if i == j: sent = "<hl> %s <hl>" % sent source_text = "%s %s" % (source_text, sent) source_text = source_text.strip() if self.model_type == "t5": source_text = source_text + " </s>" inputs.append(source_text) return sents, inputs def _prepare_inputs_for_qg_from_answers_hl(self, sents, answers): inputs = [] for i, answer in enumerate(answers): if len(answer) == 0: continue for answer_text in answer: sent = sents[i] sents_copy = sents[:] answer_text = answer_text.strip() ans_start_idx = 0 # ans_start_idx = sent.index(answer_text) # if answer_text in sent: # ans_start_idx = sent.index(answer_text) # else: # continue sent = f"{sent[:ans_start_idx]} <hl> {answer_text} <hl> {sent[ans_start_idx + len(answer_text): ]}" sents_copy[i] = sent source_text = " ".join(sents_copy) source_text = f"generate question: {source_text}" if self.model_type == "t5": source_text = source_text + " </s>" inputs.append({"answer": answer_text, "source_text": source_text}) return inputs def _prepare_inputs_for_qg_from_answers_prepend(self, context, answers): flat_answers = list(itertools.chain(*answers)) examples = [] for answer in flat_answers: source_text = f"answer: {answer} context: {context}" if self.model_type == "t5": source_text = source_text + " </s>" examples.append({"answer": answer, "source_text": source_text}) return examples class MultiTaskQAQGPipeline(QGPipeline): def __init__(self, **kwargs): super().__init__(**kwargs) def __call__(self, inputs: Union[Dict, str]): if type(inputs) is str: # do qg return super().__call__(inputs) else: # do qa return self._extract_answer(inputs["question"], inputs["context"]) def _prepare_inputs_for_qa(self, question, context): source_text = f"question: {question} context: {context}" if self.model_type == "t5": source_text = source_text + " </s>" return source_text def _extract_answer(self, question, context): source_text = self._prepare_inputs_for_qa(question, context) inputs = self._tokenize([source_text], padding=False) outs = self.model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), max_length=16, ) answer = self.tokenizer.decode(outs[0], skip_special_tokens=True) return answer class E2EQGPipeline: def __init__( self, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, use_cuda: bool ) : self.model = model self.tokenizer = tokenizer self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu" self.model.to(self.device) assert self.model.__class__.__name__ in ["T5ForConditionalGeneration", "BartForConditionalGeneration"] if "T5ForConditionalGeneration" in self.model.__class__.__name__: self.model_type = "t5" else: self.model_type = "bart" self.default_generate_kwargs = { "max_length": 256, "num_beams": 4, "length_penalty": 1.5, "no_repeat_ngram_size": 3, "early_stopping": True, } def __call__(self, context: str, **generate_kwargs): inputs = self._prepare_inputs_for_e2e_qg(context) # TODO: when overrding default_generate_kwargs all other arguments need to be passsed # find a better way to do this if not generate_kwargs: generate_kwargs = self.default_generate_kwargs input_length = inputs["input_ids"].shape[-1] # max_length = generate_kwargs.get("max_length", 256) # if input_length < max_length: # logger.warning( # "Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)".format( # max_length, input_length # ) # ) outs = self.model.generate( input_ids=inputs['input_ids'].to(self.device), attention_mask=inputs['attention_mask'].to(self.device), **generate_kwargs ) prediction = self.tokenizer.decode(outs[0], skip_special_tokens=True) questions = prediction.split("<sep>") questions = [question.strip() for question in questions[:-1]] return questions def _prepare_inputs_for_e2e_qg(self, context): source_text = f"generate questions: {context}" if self.model_type == "t5": source_text = source_text + " </s>" inputs = self._tokenize([source_text], padding=False) return inputs def _tokenize( self, inputs, padding=True, truncation=True, add_special_tokens=True, max_length=512 ): inputs = self.tokenizer.batch_encode_plus( inputs, max_length=max_length, add_special_tokens=add_special_tokens, truncation=truncation, padding="max_length" if padding else False, pad_to_max_length=padding, return_tensors="pt" ) return inputs SUPPORTED_TASKS = { "question-generation": { "impl": QGPipeline, "default": { "model": "valhalla/t5-small-qg-hl", "ans_model": "valhalla/t5-small-qa-qg-hl", } }, "multitask-qa-qg": { "impl": MultiTaskQAQGPipeline, "default": { "model": "valhalla/t5-small-qa-qg-hl", } }, "e2e-qg": { "impl": E2EQGPipeline, "default": { "model": "valhalla/t5-small-e2e-qg", } } } def pipeline( task: str, model: Optional = None, tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None, qg_format: Optional[str] = "highlight", ans_model: Optional = None, ans_tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None, use_cuda: Optional[bool] = True, **kwargs, ): # Retrieve the task if task not in SUPPORTED_TASKS: raise KeyError("Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys()))) targeted_task = SUPPORTED_TASKS[task] task_class = targeted_task["impl"] # Use default model/config/tokenizer for the task if no model is provided if model is None: model = targeted_task["default"]["model"] # Try to infer tokenizer from model or config name (if provided as str) if tokenizer is None: if isinstance(model, str): tokenizer = model else: # Impossible to guest what is the right tokenizer here raise Exception( "Impossible to guess which tokenizer to use. " "Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer." ) # Instantiate tokenizer if needed if isinstance(tokenizer, (str, tuple)): if isinstance(tokenizer, tuple): # For tuple we have (tokenizer name, {kwargs}) tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1]) else: tokenizer = AutoTokenizer.from_pretrained(tokenizer) # Instantiate model if needed if isinstance(model, str): model = AutoModelForSeq2SeqLM.from_pretrained(model) if task == "question-generation": if ans_model is None: # load default ans model ans_model = targeted_task["default"]["ans_model"] ans_tokenizer = AutoTokenizer.from_pretrained(ans_model) ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model) else: # Try to infer tokenizer from model or config name (if provided as str) if ans_tokenizer is None: if isinstance(ans_model, str): ans_tokenizer = ans_model else: # Impossible to guest what is the right tokenizer here raise Exception( "Impossible to guess which tokenizer to use. " "Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer." ) # Instantiate tokenizer if needed if isinstance(ans_tokenizer, (str, tuple)): if isinstance(ans_tokenizer, tuple): # For tuple we have (tokenizer name, {kwargs}) ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer[0], **ans_tokenizer[1]) else: ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer) if isinstance(ans_model, str): ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model) if task == "e2e-qg": return task_class(model=model, tokenizer=tokenizer, use_cuda=use_cuda) elif task == "question-generation": return task_class(model=model, tokenizer=tokenizer, ans_model=ans_model, ans_tokenizer=ans_tokenizer, qg_format=qg_format, use_cuda=use_cuda) else: return task_class(model=model, tokenizer=tokenizer, ans_model=model, ans_tokenizer=tokenizer, qg_format=qg_format, use_cuda=use_cuda)
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
aionNAS.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import logging logging.getLogger('tensorflow').disabled = True import json #from nltk.corpus import stopwords from collections import Counter from numpy import mean from numpy import std from pandas import read_csv from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer from learner.machinelearning import machinelearning # from sklearn.dummy import DummyClassifier # create histograms of numeric input variables import sys import os import re import pandas as pd import numpy as np from learner.aion_matrix import aion_matrix import tensorflow as tf tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) import autokeras as ak # load the sonar dataset from sklearn.model_selection import train_test_split # from sklearn.metrics import cohen_kappa_score # from sklearn.metrics import roc_auc_score # from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_curve from math import sqrt from sklearn.metrics import mean_squared_error, explained_variance_score,mean_absolute_error from sklearn import metrics class aionNAS: def __init__(self,nas_class,nas_params,xtrain1,xtest1,ytrain1,ytest1,deployLocation): try: self.dfFeatures=None self.nas_class=nas_class self.nas_params=nas_params self.targetFeature=None self.log = logging.getLogger('eion') self.n_models=int(self.nas_params['n_models']) self.n_epochs=int(self.nas_params['n_epochs']) self.optimizer=self.nas_params['optimizer'] self.metrics=self.nas_params['metrics'] self.tuner=self.nas_params['tuner'] self.seed=int(self.nas_params['seed']) self.xtrain = xtrain1 self.xtest = xtest1 self.ytrain = ytrain1 self.ytest = ytest1 #self.labelMaps = labelMaps self.deployLocation=deployLocation except Exception as e: self.log.info('<!------------- NAS INIT Error ---------------> ') exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def paramCheck(self): try: if not (self.nas_class): self.log.info('<!------------- NAS class input Error ---------------> ') if not (self.nas_params): self.log.info('<!------------- NAS model hyperparameter input Error ---------------> ') if not (self.targetFeature): self.log.info('<!------------- NAS model targetFeature input Error ---------------> ') if (self.n_models < 1): self.n_models=1 if not (self.dfFeatures): self.log.info('<!------------- NAS model features Error ---------------> ') if (self.n_epochs < 1): self.n_models=1 if not (self.optimizer): self.optimizer="adam" if not (self.tuner): self.tuner="greedy" if (self.seed < 1): self.seed=0 if not (self.metrics): self.metrics=None except ValueError: self.log.info('<------------------ NAS config file error. --------------->') def recall_m(self,y_true, y_pred): true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1))) possible_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + tf.keras.backend.epsilon()) return recall def precision_m(self,y_true, y_pred): true_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_true * y_pred, 0, 1))) predicted_positives = tf.keras.metrics.Sum(tf.keras.backend.round(tf.keras.backend.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + tf.keras.backend.epsilon()) return precision def f1_score(self,y_true, y_pred): precision = self.precision_m(y_true, y_pred) recall = self.recall_m(y_true, y_pred) return 2*((precision*recall)/(precision+recall+tf.keras.backend.epsilon())) def nasStructdataPreprocess(self): df=self.data self.paramCheck() target=df[self.targetFeature].values counter = Counter(target) for k,v in counter.items(): per = v / len(target) * 100 self.log.info('autokeras struct Class=%d, Count=%d, Percentage=%.3f%%' % (k, v, per)) # select columns with numerical data types num_ix = df.select_dtypes(include=['int64', 'float64']).columns subset = df[num_ix] last_ix = len(df.columns) - 1 y=df[self.targetFeature] X = df.drop(self.targetFeature, axis=1) #Using Pearson Correlation # plt.figure(figsize=(12,10)) # cor = df.corr() # sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) # plt.show() # select categorical features cat_ix = X.select_dtypes(include=['object', 'bool']).columns # one hot encode cat features only ct = ColumnTransformer([('o',OneHotEncoder(),cat_ix)], remainder='passthrough') X = X.reset_index() X=X.replace(to_replace="NULL",value=0) X = X.dropna(how='any',axis=0) X = ct.fit_transform(X) from sklearn.preprocessing import scale X = scale(X) # label encode the target variable to have the classes 0 and 1 y = LabelEncoder().fit_transform(y) # separate into train and test sets X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=self.test_size,random_state=1) return X_train, X_test, y_train, y_test def nasStructClassification(self,scoreParam): try: objClf = aion_matrix() X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest modelName="nas_structdata_classifier" self.log.info("Processing structured data block...\n") s_in = ak.StructuredDataInput() #s_in = Flatten()(s_in) s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in) self.log.info("Data pipe via autokeras Classification Dense layers ...\n") s_out = ak.ClassificationHead()(s_out) self.log.info("applying autokeras automodel to run different neural models...\n") try: tuner = str(self.tuner).lower() except UnicodeEncodeError: tuner = (self.tuner.encode('utf8')).lower() nasclf = ak.AutoModel( inputs=s_in, outputs=s_out, overwrite=True, tuner=tuner, max_trials=self.n_models, seed=self.seed) # compile the model #nasclf.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc',self.f1_score,self.precision_m, self.recall_m]) nasclf.fit(X_train, y_train, epochs=self.n_epochs) best_model = nasclf.export_model() mpredict=best_model.predict(X_test) mtpredict=best_model.predict(X_train) #loss, accuracy, f1_score, precision, recall = nasclf.evaluate(X_test, y_test, verbose=0) #from sklearn.metrics import classification_report #Classification report y_pred_bool = np.argmax(mpredict, axis=1) y_train_pred_bool = np.argmax(mtpredict, axis=1) score = objClf.get_score(scoreParam,y_test, y_pred_bool) #best_model = nasclf.export_model() best_model_summary=best_model.summary() filename = os.path.join(self.deployLocation,'log','summary.txt') with open(filename,'w') as f: best_model.summary(print_fn=lambda x: f.write(x + '\n')) f.close() #self.log.info("==========") #self.log.info(best_model_summary) self.log.info("NAS struct data classification, best model summary: \n"+str(best_model.summary(print_fn=self.log.info))) #self.log.info("==========") #Save and load model # # #try: # try: # best_model.save("model_class_autokeras", save_format="tf") # except Exception: # best_model.save("model_class_autokeras.h5") # loaded_model = load_model("model_class_autokeras", custom_objects=ak.CUSTOM_OBJECTS) # loadedmodel_predict=loaded_model.predict(X_test) loss,accuracy_m=nasclf.evaluate(X_test, y_test) #mpredict_classes = mpredict.argmax(axis=-1) #accuracy = accuracy_score(y_test.astype(int), mpredict.astype(int)) # precision tp / (tp + fp) #precision = precision_score(y_test.astype(int), mpredict.astype(int),average='macro') # recall: tp / (tp + fn) #recall = recall_score(y_test.astype(int), mpredict.astype(int),average='macro') #f1score=f1_score(y_test.astype(int), mpredict.astype(int) , average="macro") self.log.info("Autokeras struct data classification metrics: \n") except Exception as inst: self.log.info("Error: NAS failed "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) print(inst) return modelName,nasclf,score def nasStructRegressor(self,scoreParam): objClf = aion_matrix() modelName="nas_struct_regressor" #self.paramCheck() X_train, X_test, y_train, y_test= self.xtrain, self.xtest, self.ytrain, self.ytest # Autokeras alg s_in = ak.StructuredDataInput() #tf.keras.layers.GlobalMaxPooling2D()(s_in) s_out = ak.StructuredDataBlock(categorical_encoding=True)(s_in) self.log.info("Data pipe via autokeras Regression Dense layers ...\n") s_out = ak.RegressionHead(loss='mse', metrics=['mae'])(s_out) self.log.info("applying autokeras automodel to evaluate different neural models...\n") try: tuner = str(self.tuner).lower() except UnicodeEncodeError: tuner = (self.tuner.encode('utf8')).lower() nas_reg = ak.AutoModel( inputs=s_in, outputs=s_out, overwrite=True, tuner=tuner, max_trials=self.n_models) nas_reg.fit(X_train, y_train, epochs=self.n_epochs) best_model = nas_reg.export_model() self.log.info("NAS struct data regression best model summary: \n") best_model_summary=best_model.summary(print_fn=self.log.info) self.log.info(best_model_summary) predictm=best_model.predict(X_test) mtpredict=best_model.predict(X_train) score = objClf.get_score(scoreParam,y_test, predictm) self.log.info("Autokeras struct data regression metrics: \n") return modelName,nas_reg,score def nasMain(self,scoreParam): modelName = "" nasclf=None nas_reg=None #text_reg_model=None mse_value=0 reg_rmse=0 mape_reg=0 huber_loss_reg=0 accuracy=0 precision=0 recall=0 #Dummy values to return main for classification problems dummy_score_1=int(0) #dummy_score_2=int(0) try: if ((self.nas_class.lower() == "classification")): modelName,nasclf,score=self.nasStructClassification(scoreParam) self.log.info('NAS Struct Classification score: '+str(score)) best_model_nas = nasclf.export_model() scoredetails = '{"Model":"NAS","Score":'+str(round(score,2))+'}' return best_model_nas,self.nas_params,round(score,2),'NAS',-1,-1,-1 elif (self.nas_class.lower() == "regression"): modelName,nas_reg,score =self.nasStructRegressor(scoreParam) self.log.info('NAS Struct Regression score: '+str(score)) best_model_nas = nas_reg.export_model() ''' filename = os.path.join(self.deployLocation,'model','autoKerasModel') best_model_nas = nas_reg.export_model() try: best_model_nas.save(filename, save_format="tf") modelName = 'autoKerasModel' except Exception: filename = os.path.join(self.deployLocation,'model','autoKerasModel.h5') best_model_nas.save(filename) modelName = 'autoKerasModel.h5' ''' scoredetails = '{"Model":"NAS","Score":'+str(round(score,2))+'}' ''' error_matrix = '"MSE":"'+str(round(mse_value,2))+'","RMSE":"'+str(round(reg_rmse,2))+'","MAPE":"'+str(round(mape_reg,2))+'","MSLE":"'+str(round(msle_reg,2))+'"' ''' return best_model_nas,self.nas_params,score,'NAS' else: pass except Exception as inst: print(inst) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) output = {"status":"FAIL","message":str(inst).strip('"')} output = json.dumps(output)
pushrecords.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import socket import os import rsa from os.path import expanduser from pathlib import Path import requests import platform from appbe.dataPath import DATA_DIR import socket import getmac import subprocess import sys import json from datetime import datetime import binascii computername = socket.getfqdn() global_key = ''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAzJcxqRiUpp7CzViyqNlYaeyceDh5y6Ib4SoxoyNkN3+k0q+cr1lb k0KdWTtHIVqH1wsLYofYjpB7X2RN0KYTv8VfwmfQNrpFEbiRz4gcAeuxGCPgGaue N1ttujQMWHWCcY+UH5Voh8YUfkW8P+T3zxvr1d30D+kVBJC59y/31JvTzr3Bw/T+ NYv6xiienYiEYtm9d5ATioEwZOXaQBrtVvRmqcod5A1h4kn1ZauLX2Ph8H4TAuit NLtw6xUCJNumphP7xdU+ca6P6a6eaLprgKhvky+nz16u9/AC2AazRQHKWf8orS6b fw16JDCRs0zU4mTQLCjkUUt0edOaRhUtcQIDAQAB -----END RSA PUBLIC KEY----- ''' quarter_key = ''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAmKzOJxVEV9ulA+cjfxguAduLMD47OWjLcEAEmEuK8vR4O5f6e2h1 08NniGC+nkwqmM00U7JTVBkqnt9S/JgE3pAH2xwfWda2OvXNWisWmOQdqB0+XRHh NXsIG3yRk/sMlDpe7MJIyM5ADSu01PLn9FZTfmMq7lEp32tAf71cuUE/dwuWSvEQ WK2hn1L4D97O43XCd7FHtMSHfgtjdcCFgX9IRgWLKC8Bm3q5qcqF4v3cHuYTj3V9 njxPtRqPg6HJFiJrm9AX5bUEHAvbTcw4wAmsNTRQHPvVB+Lc+yGh5x8crhKjNB01 gdB5I3a4mPO7dKvadR6Mr28trr0Ff5t2HQIDAQAB -----END RSA PUBLIC KEY----- ''' halfYear_key=''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAgrGNwl8CNYQmVxi8/GEgPjfL5aEmyPkDyaJb9h4hZDSZCeeKd7Rv wwhuRTdBBfOp0bQ7QS7NYMg38Xlc3x85I9RnxdQdDKn2nRuvG0hG3wMBFy/DCSXF tXbDjJkLijAhqcBNu8m+a2Gtn14ShC7TbcfY4iVXho3WFUrn0xq6S5ducqWCsLJh R+TNImCaMICqfoAzEDGC3ojO5Hi3vJmmyK5CVp6bt4wLRATQjcp1ujGW4Uv4kEgp 7TR077c226v1KOdKdyZPHJzT1MKwZrG2Gdluk3/Y1apbwyGzYqFdTCOAB+mE73Dn wFXURgDJQmaU2oxxaA13WRcELpnirm+aIwIDAQAB -----END RSA PUBLIC KEY----- ''' oneYear_key=''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEA3GLqn+vkKn3fTNH3Bbb3Lq60pCoe+mn0KPz74Bp7p5OkZAUe14pP Tcf/UqdPwiENhSCseWtfZmfKDK8qYRHJ5xW02+AhHPPdiacS45X504/lGG3q/4SG ZgaFhMDvX+IH/ZH+qqbU3dRQhXJCCrAVAa7MonzM6yPiVeS2SdpMkNg1VDR1oTLB Pn+qSV6CnkK1cYtWCRQ23GH2Ru7fc09r7m8hVcifKJze84orpHC5FX0WScQuR8h/ fs1IbGkxTOxP8vplUj/cd4JjUxgd+w+8R4kcoPhdGZF5UGeZA8xMERzQLvh+4Ui0 KIvz5/iyKB/ozaeSG0OMwDAk3WDEnb1WqQIDAQAB -----END RSA PUBLIC KEY----- ''' full_key=''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAhqfNMuYYLdVrePhkO9rU/qT6FgolzI0YyzIJ2OeJE+++JioYm6nn ohQU32iiE0DZlCCLrHJXOOIAz2Op80goX0lxtngyxVUPsiB5CI77sAC7x6K3anJ0 elpnQCC0+xV2ZL5eIMNQHLe+X6wJl/HGWqkUlxKpWr4/kBEB4EisW60OePfhntIN 4OUJ7iEq+sDdOM5WazJIXeNV1cig4i6057GE3k5ITcQUmw17DZu2+dqkIscckaG+ t5SF7Qnvt4IY8IeQp2htx3yD+CJCV0u2uKwoSFMGJn3OWdaixC3+eojyMXmfAWtQ Ee9NLNNaTCMIvQ8BeItJLQs2Htw3bZNMvwIDAQAB -----END RSA PUBLIC KEY----- ''' def validate_key_Pair(privatepath,publickey): with open(privatepath, 'rb') as privatefile: keydata = privatefile.read() privatefile.close() try: privkey = rsa.PrivateKey.load_pkcs1(keydata,'PEM') data = 'Validate Global License' signature = rsa.sign(data.encode('utf-8'), privkey, 'SHA-1') pubkey = rsa.PublicKey.load_pkcs1(publickey) except: return False try: rsa.verify(data.encode('utf-8'), signature, pubkey) return True except Exception as e: return False def updateDRecord(licensepath): domain_license_path = os.path.join(DATA_DIR,'License','license_domain.lic') if(os.path.isfile(licensepath)): with open(licensepath, 'rb') as f: licensekey = f.read() f.close() with open(domain_license_path, 'wb') as f: f.write(licensekey) f.close() if(validate_key_Pair(domain_license_path,global_key)): return True,'Valid Domain License' else: return False,'Invalid Domain License' else: return False,'File Not Exists' def generateLicenseKey(userKey): record = {'UserKey':userKey} record = json.dumps(record) status = 'Error' url = 'https://qw7e33htlk.execute-api.ap-south-1.amazonaws.com/default/aion_license' try: response = requests.post(url, data=record,headers={"x-api-key":"3cQKRkKA4S57pYrkFp1Dd9jRXt4xnFoB9iqhAQRM","Content-Type":"application/json",}) if response.status_code == 200: outputStr=response.content outputStr = outputStr.decode('utf-8','ignore') outputStr = outputStr.strip() license_dict = json.loads(str(outputStr)) if license_dict['status'] == 'success': status = 'Success' licenseKey = license_dict['msg'] else: status = 'Error' licenseKey = '' else: status = 'Error' licenseKey = '' except Exception as inst: print(inst) status = 'Error' licenseKey = '' msg = {'status':status,'key':userKey,'licenseKey':licenseKey,'link':''} return msg def updateRecord(licensepath): currentDirectory = os.path.dirname(os.path.abspath(__file__)) license_path = os.path.join(currentDirectory,'..','lic','license.lic') if(os.path.isfile(licensepath)): with open(licensepath, 'rb') as f: licensekey = f.read() f.close() with open(license_path, 'wb') as f: f.write(licensekey) f.close() status,msg = check_domain_license() if status: status,msg = getdaysfromstartdate() if status: status,msg = check_days_license(int(msg)) return status,msg else: return False,'File Not Exists' def check_domain_license(): if 'CORP.HCL.IN' in computername: return True,'HCL Domain' else: return True,'HCL Domain' def diff_month(d1, d2): return (d1.year - d2.year) * 12 + d1.month - d2.month def getdaysfromstartdate(): currentDirectory = os.path.dirname(os.path.abspath(__file__)) startdatePath = os.path.join(currentDirectory,'..','lic','startdate.txt') if(os.path.isfile(startdatePath)): with open(startdatePath, "rb") as fl: encrypted_message = fl.read() fl.close() privkey = '''-----BEGIN RSA PRIVATE KEY----- MIIEqwIBAAKCAQEAm75ZwaepuxGJjU1Slk1+IUO2E49Hy8i9dym5FUaBRyTRH6R+ GTF1kcpd+1QinIZDMIdsmAc95Y8pTufxY30QxCkOhVASitSQWHS/IiWQHmsTJwdr 38lqZnQQloOt/iPlhcavbxu/yKFzwBmp+nM+ErDTnCBh6EGCGrw1xWF30T2IBpmp WwMEoqZsFV69RzwQAw39KG1KCxi5uscrB62YPgUdlT2b4Yaa90egQhGLLVdnKvhP ORiGT9omCH90Dkm1oMMQ0Y2JBLezgXa/bunSqtTBxEwzlwUAX2JJcanFYrzKy2OL xzwNRlWUXilZ4R/1RHAgUdNyKbYxZqc24MApoQIDAQABAoIBAQCHZ/i7gNz10qqH 2qkqGlfF7gvYd6MRTwdDGlhbYgA17ZGP9EDaAIFabtpFEAJDmgvCnotQpkMvWcet XcUmHW89TQDd8R8d6u9QqLggpQ3nFGsDbNViLMjAKLrfUb8tjOIZ7ANNE5ArjAuK AgYhxJ48O9bPD+xvtLwip95PHxMMz1CF0vxrpCinvPdeC3HzcnLNZWN3ustbph/4 Tx8mrKDpAVIHVYVbY4CMtm7NbIBYdyR9Lokc4zBg/OTuLo+0QRVJ3GHAN6cGxTwY vLwN9iBBHyn9WBp5NIOSoCdob7+ce8y+X8yHmVhwRCfcrYphzfFNfP7SPNzV1dLs dFybn/h9AoGJALCOC7ss+PBXy5WrWVNRPzFO7KrJDl5q7s/gMk0PkB4i4XOKHDTl MhHZXhxp84HwpphwNxPHvpFe3pVZwwoe8LH1neoodlLOF0Kuk3jENh6cMhKFvcZ+ gxaBxGSCOXF/U307mh0i4AafClhVjxtLgBW5iJSVA9Brc7ZqVwxlUP7aYGzReIE1 uEMCeQDh0vq8NteUlkM/wpNzrHHqgtEzePbTYa+QcTm4xhARHR/cO+E0/mZIfltw 3NVWCIalMia+aKnvRHqHy/cQfEo2Uv/h8oARWnbrvicMRTwYL0w2GrP0f+aG0RqQ msLMzS3kp6szhM7C99reFxdlxJoWBKkp94psOksCgYkApB01zGRudkK17EcdvjPc sMHzfoFryNpPaI23VChuR4UW2mZ797NAypSqRXE7OALxaOuOVuWqP8jW0C9i/Cps hI+SnZHFAw2tU3+hd3Wz9NouNUd6c2MwCSDQ5LikGttHSTa49/JuGdmGLTxCzRVu V0NiMPMfW4I2Sk8o4U3gbzWgwiYohLrhrwJ5ANun/7IB2lIykvk7B3g1nZzRYDIk EFpuI3ppWA8NwOUUoj/zksycQ9tx5Pn0JCMKKgYXsS322ozc3B6o3AoSC5GpzDH4 UnAOwavvC0ZZNeoEX6ok8TP7EL3EOYW8s4zIa0KFgPac0Q0+T4tFhMG9qW+PWwhy Oxeo3wKBiCQ8LEgmHnXZv3UZvwcikj6oCrPy8fnhp5RZl2DPPlaqf3vokE6W5oEo LIKcWKvth3EU7HRKwYgaznj/Mw55aETx31R0FiXMG266B4V7QWPF/KuaR0GBsYfu +edGXQCnLgooKlMtQLdL5mcLXHc9x/0Z0iYEejJtbjcGR87WylSNaCH3hH703iQ= -----END RSA PRIVATE KEY----- ''' privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM') decrypted_message = rsa.decrypt(encrypted_message, privkey) decrypted_message = decrypted_message.decode() import datetime start_time = datetime.datetime.strptime(decrypted_message, '%Y-%m-%d') current_date = datetime.datetime.today().strftime('%Y-%m-%d') current_date = datetime.datetime.strptime(current_date, '%Y-%m-%d') Months = diff_month(current_date,start_time) return True,Months else: return False,'Start Date Not Exists' def check_days_license(months): currentDirectory = os.path.dirname(os.path.abspath(__file__)) license_path = os.path.join(currentDirectory,'..','lic','license.lic') if(os.path.isfile(license_path)): if(validate_key_Pair(license_path,full_key)): return True,'Valid License' elif(validate_key_Pair(license_path,oneYear_key)): if months <= 12: return True,'Valid License' else: return False,'License for AI.ON has expired. Please contact ERS Research for renewal.' elif(validate_key_Pair(license_path,halfYear_key)): if months <= 6: return True,'Valid License' else: return False,'License for AI.ON has expired. Please contact ERS Research for renewal.' elif(validate_key_Pair(license_path,quarter_key)): if months <= 3: return True,'Valid License' else: return False,'License for AI.ON has expired. Please contact ERS Research for renewal.' else: return False,'Invalid License' else: return False,'License Not exists.Please contact ERS Research for renewal.' def checklicense(): import binascii license_path = os.path.join(DATA_DIR,'License','license.lic') if(os.path.isfile(license_path)): try: with open(license_path, 'r') as privatefile: license_key = privatefile.read() privatefile.close() encrypted_message = binascii.unhexlify(license_key.encode()) privkey = '''-----BEGIN RSA PRIVATE KEY----- MIIEqQIBAAKCAQEAhqfNMuYYLdVrePhkO9rU/qT6FgolzI0YyzIJ2OeJE+++JioY m6nnohQU32iiE0DZlCCLrHJXOOIAz2Op80goX0lxtngyxVUPsiB5CI77sAC7x6K3 anJ0elpnQCC0+xV2ZL5eIMNQHLe+X6wJl/HGWqkUlxKpWr4/kBEB4EisW60OePfh ntIN4OUJ7iEq+sDdOM5WazJIXeNV1cig4i6057GE3k5ITcQUmw17DZu2+dqkIscc kaG+t5SF7Qnvt4IY8IeQp2htx3yD+CJCV0u2uKwoSFMGJn3OWdaixC3+eojyMXmf AWtQEe9NLNNaTCMIvQ8BeItJLQs2Htw3bZNMvwIDAQABAoIBAGGmuRnrYaeDeWAO CmqZxRMyQybOjyDrRgq9rAR/zJoHp8b3ikcBDTkuBQELWVZLFj7k50XU2cono9zC cxI5xwVrNqrUOkV+7VYJVJzPTFkT/xnEt+zbOfstKmmIDpdzthtTLuHlomhhHA83 rPFi5a0Dpynz35suEnm6ONxx4ICONa3xkQ51ALm8EEsdJ+qRQhi2HLTF/OVZMxSa A2DlFd4ChOEbYaN63xVCDxPXe9BfeHd/Rnim9x4xL9i2RL+mhARUy/ZP6LMHIPk7 NxTrGr4TuE/ETg8FZ3cywSnwsMlcplXo8Ar+5ths2XKxbmH1TI/vuQV1r7r0IeqV F4W/xOkCgYkAiDQy7/WyJWuT+rQ+gOjSUumXgWE3HO+vJAsy05cTZFSs+nUE4ctn FnvbBIRuClSr3zhcTtjaEaVnZ2OmGfOoAq0cvaXSlxqEs2456WQBf9oPHnvJEV07 AIqzo2EuDvGUh/bkFN3+djRRL9usNNplYA8jU3OQHGdeaS15ZikT+ZkQLXoHE0Oh vQJ5AP0W9Qouvc9jXRhjNNOWmgt+JiHw/oQts/LUWJ2T4UJ7wKAqGwsmgf0NbF2p aZ6AbMc7dHzCb52iLJRxlmlkJYzg449t0MgQVxTKQ5viIAdjkRBCIY2++GcYXb6k 6tUnF0Vm2kpffYUb5Lx5JoUE6IhMP0mEv3jKKwKBiCmvoC9lCUL+q+m9JKwbldOe fqowcMfAa+AiNUohIORCLjbxfa8Fq+VrvtqhFXS/+WJ2Q3o2UHe6Ie24x+uFcVRw Wy2IBO4ORbMM91iBLRxORvZTeHSCDj7aNKS6Z3hXY9hBLglc8DaJSJfXKdt7RC+k MnGmGuM2l+Sk8FTeGaj4ucTRZjz1JBkCeQDhNSV1GyShv4xeoCCoy1FmOqmZ+EWy vqxqv1PfXHDM5SwCGZWY9XokAGbWbWLjvOmO27QLNEV34pCCwxSR0aCsXI2B2rk2 3Xtvr5A7zRqtGIdEDWSoKjAGJSN9+mhQpglKI3zJQ3GBGdIPeEqzgSud5SNHu01a IaMCgYgyoxtqdWi90iE75/x+uIVGJRdHtWoL2dr8Ixu1bOMjKCR8gjneSRTqI1tA lbRH5K/jg6iccB/pQmBcIPIubF10Nv/ZQV760WK/h6ue2hOCaBLWT8EQEEfBfnp+ 9rfBfNQIQIkBFTfGIHXUUPb9sJgDP1boUxcqxr9bpKUrs1EMkUd+PrvpHIj2 -----END RSA PRIVATE KEY----- ''' privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM') decrypted_message = rsa.decrypt(encrypted_message, privkey) msg = decrypted_message.decode().split('####') product = msg[0] computernameLicense = msg[1] computername = socket.getfqdn() licenseValid = False if product.lower() == 'aion': if computernameLicense == computername: uuidlicense = msg[3] uuid = guid() if uuidlicense == uuid: current_date = datetime.now() license_expiry_date = msg[5] license_expiry_date = datetime.strptime(license_expiry_date,'%Y-%m-%d %H:%M:%S') if current_date > license_expiry_date: return False,'License Expire' else: return True,'' return False,'License Error' except Exception as e: print(e) return False,'License Error' else: return False,'Generate License' def generate_record_key(product,version): computername = socket.getfqdn() macaddress = getmac.get_mac_address() license_date = datetime.today().strftime('%Y-%m-%d %H:%M:%S') try: user = os.getlogin() except: user = 'NA' uuid = guid() msg = product+'###'+version+'###'+computername+'###'+macaddress+'###'+user+'###'+sys.platform+'###'+uuid+'###'+license_date pkeydata='''-----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAm75ZwaepuxGJjU1Slk1+IUO2E49Hy8i9dym5FUaBRyTRH6R+GTF1 kcpd+1QinIZDMIdsmAc95Y8pTufxY30QxCkOhVASitSQWHS/IiWQHmsTJwdr38lq ZnQQloOt/iPlhcavbxu/yKFzwBmp+nM+ErDTnCBh6EGCGrw1xWF30T2IBpmpWwME oqZsFV69RzwQAw39KG1KCxi5uscrB62YPgUdlT2b4Yaa90egQhGLLVdnKvhPORiG T9omCH90Dkm1oMMQ0Y2JBLezgXa/bunSqtTBxEwzlwUAX2JJcanFYrzKy2OLxzwN RlWUXilZ4R/1RHAgUdNyKbYxZqc24MApoQIDAQAB -----END RSA PUBLIC KEY----- ''' pubkey = rsa.PublicKey.load_pkcs1(pkeydata) encrypted_message = rsa.encrypt(msg.encode(), pubkey) encrypted_message = binascii.hexlify(encrypted_message).decode() return(encrypted_message) def run(cmd): try: return subprocess.run(cmd, shell=True, capture_output=True, check=True, encoding="utf-8").stdout.strip() except Exception as e: print(e) return None def guid(): if sys.platform == 'darwin': return run( "ioreg -d2 -c IOPlatformExpertDevice | awk -F\\\" '/IOPlatformUUID/{print $(NF-1)}'", ) if sys.platform == 'win32' or sys.platform == 'cygwin' or sys.platform == 'msys': return run('wmic csproduct get uuid').split('\n')[2].strip() if sys.platform.startswith('linux'): return run('cat /var/lib/dbus/machine-id') or \ run('cat /etc/machine-id') if sys.platform.startswith('openbsd') or sys.platform.startswith('freebsd'): return run('cat /etc/hostid') or \ run('kenv -q smbios.system.uuid') def updateLicense(licensekey): license_folder = os.path.join(DATA_DIR,'License') license_folder = Path(license_folder) license_folder.mkdir(parents=True, exist_ok=True) license_file = license_folder/'license.lic' with open(license_file, "w") as fl: fl.write(licensekey) fl.close() def enterRecord(version): validLicense,msg = checklicense() if not validLicense: key = generate_record_key('AION',version) msg = {'status':msg,'key':key,'licenseKey':'','link':''} return validLicense,msg