file
stringlengths 6
44
| content
stringlengths 38
162k
|
---|---|
reader.py | """This module contains the Reader class."""
from .builtin_datasets import BUILTIN_DATASETS
class Reader:
"""The Reader class is used to parse a file containing ratings.
Such a file is assumed to specify only one rating per line, and each line
needs to respect the following structure: ::
user ; item ; rating ; [timestamp]
where the order of the fields and the separator (here ';') may be
arbitrarily defined (see below). brackets indicate that the timestamp
field is optional.
For each built-in dataset, Surprise also provides predefined readers which
are useful if you want to use a custom dataset that has the same format as
a built-in one (see the ``name`` parameter).
Args:
name(:obj:`string`, optional): If specified, a Reader for one of the
built-in datasets is returned and any other parameter is ignored.
Accepted values are 'ml-100k', 'ml-1m', and 'jester'. Default
is ``None``.
line_format(:obj:`string`): The fields names, in the order at which
they are encountered on a line. Please note that ``line_format`` is
always space-separated (use the ``sep`` parameter). Default is
``'user item rating'``.
sep(char): the separator between fields. Example : ``';'``.
rating_scale(:obj:`tuple`, optional): The rating scale used for every
rating. Default is ``(1, 5)``.
skip_lines(:obj:`int`, optional): Number of lines to skip at the
beginning of the file. Default is ``0``.
"""
def __init__(
self,
name=None,
line_format="user item rating",
sep=None,
rating_scale=(1, 5),
skip_lines=0,
):
if name:
try:
self.__init__(**BUILTIN_DATASETS[name].reader_params)
except KeyError:
raise ValueError(
"unknown reader "
+ name
+ ". Accepted values are "
+ ", ".join(BUILTIN_DATASETS.keys())
+ "."
)
else:
self.sep = sep
self.skip_lines = skip_lines
self.rating_scale = rating_scale
lower_bound, higher_bound = rating_scale
splitted_format = line_format.split()
entities = ["user", "item", "rating"]
if "timestamp" in splitted_format:
self.with_timestamp = True
entities.append("timestamp")
else:
self.with_timestamp = False
# check that all fields are correct
if any(field not in entities for field in splitted_format):
raise ValueError("line_format parameter is incorrect.")
self.indexes = [splitted_format.index(entity) for entity in entities]
def parse_line(self, line):
"""Parse a line.
Ratings are translated so that they are all strictly positive.
Args:
line(str): The line to parse
Returns:
tuple: User id, item id, rating and timestamp. The timestamp is set
to ``None`` if it does no exist.
"""
line = line.split(self.sep)
try:
if self.with_timestamp:
uid, iid, r, timestamp = (line[i].strip() for i in self.indexes)
else:
uid, iid, r = (line[i].strip() for i in self.indexes)
timestamp = None
except IndexError:
raise ValueError(
"Impossible to parse line. Check the line_format" " and sep parameters."
)
return uid, iid, float(r), timestamp
|
__init__.py | from pkg_resources import get_distribution
from . import dump, model_selection
from .builtin_datasets import get_dataset_dir
from .dataset import Dataset
from .prediction_algorithms import (
AlgoBase,
BaselineOnly,
CoClustering,
KNNBaseline,
KNNBasic,
KNNWithMeans,
KNNWithZScore,
NMF,
NormalPredictor,
Prediction,
PredictionImpossible,
SlopeOne,
SVD,
SVDpp,
)
from .reader import Reader
from .trainset import Trainset
__all__ = [
"AlgoBase",
"NormalPredictor",
"BaselineOnly",
"KNNBasic",
"KNNWithMeans",
"KNNBaseline",
"SVD",
"SVDpp",
"NMF",
"SlopeOne",
"CoClustering",
"PredictionImpossible",
"Prediction",
"Dataset",
"Reader",
"Trainset",
"dump",
"KNNWithZScore",
"get_dataset_dir",
"model_selection",
]
__version__ = get_distribution("scikit-surprise").version
|
builtin_datasets.py | """This module contains built-in datasets that can be automatically
downloaded."""
import errno
import os
import zipfile
from collections import namedtuple
from os.path import join
from urllib.request import urlretrieve
def get_dataset_dir():
"""Return folder where downloaded datasets and other data are stored.
Default folder is ~/.surprise_data/, but it can also be set by the
environment variable ``SURPRISE_DATA_FOLDER``.
"""
folder = os.environ.get(
"SURPRISE_DATA_FOLDER", os.path.expanduser("~") + "/.surprise_data/"
)
try:
os.makedirs(folder)
except OSError as e:
if e.errno != errno.EEXIST:
# reraise exception if folder does not exist and creation failed.
raise
return folder
# a builtin dataset has
# - an url (where to download it)
# - a path (where it is located on the filesystem)
# - the parameters of the corresponding reader
BuiltinDataset = namedtuple("BuiltinDataset", ["url", "path", "reader_params"])
BUILTIN_DATASETS = {
"ml-100k": BuiltinDataset(
url="https://files.grouplens.org/datasets/movielens/ml-100k.zip",
path=join(get_dataset_dir(), "ml-100k/ml-100k/u.data"),
reader_params=dict(
line_format="user item rating timestamp", rating_scale=(1, 5), sep="\t"
),
),
"ml-1m": BuiltinDataset(
url="https://files.grouplens.org/datasets/movielens/ml-1m.zip",
path=join(get_dataset_dir(), "ml-1m/ml-1m/ratings.dat"),
reader_params=dict(
line_format="user item rating timestamp", rating_scale=(1, 5), sep="::"
),
),
"jester": BuiltinDataset(
url="https://eigentaste.berkeley.edu/dataset/archive/jester_dataset_2.zip",
path=join(get_dataset_dir(), "jester/jester_ratings.dat"),
reader_params=dict(line_format="user item rating", rating_scale=(-10, 10)),
),
}
def download_builtin_dataset(name):
dataset = BUILTIN_DATASETS[name]
print("Trying to download dataset from " + dataset.url + "...")
tmp_file_path = join(get_dataset_dir(), "tmp.zip")
urlretrieve(dataset.url, tmp_file_path)
with zipfile.ZipFile(tmp_file_path, "r") as tmp_zip:
tmp_zip.extractall(join(get_dataset_dir(), name))
os.remove(tmp_file_path)
print("Done! Dataset", name, "has been saved to", join(get_dataset_dir(), name))
|
__main__.py | #!/usr/bin/env python
import argparse
import os
import random as rd
import shutil
import sys
import numpy as np
import surprise.dataset as dataset
from surprise import __version__
from surprise.builtin_datasets import get_dataset_dir
from surprise.dataset import Dataset
from surprise.model_selection import cross_validate, KFold, PredefinedKFold
from surprise.prediction_algorithms import (
BaselineOnly,
CoClustering,
KNNBaseline,
KNNBasic,
KNNWithMeans,
NMF,
NormalPredictor,
SlopeOne,
SVD,
SVDpp,
)
from surprise.reader import Reader # noqa
def main():
class MyParser(argparse.ArgumentParser):
"""A parser which prints the help message when an error occurs. Taken from
https://stackoverflow.com/questions/4042452/display-help-message-with-python-argparse-when-script-is-called-without-any-argu.""" # noqa
def error(self, message):
sys.stderr.write("error: %s\n" % message)
self.print_help()
sys.exit(2)
parser = MyParser(
description="Evaluate the performance of a rating prediction "
+ "algorithm "
+ "on a given dataset using cross validation. You can use a built-in "
+ "or a custom dataset, and you can choose to automatically split the "
+ "dataset into folds, or manually specify train and test files. "
+ "Please refer to the documentation page "
+ "(https://surprise.readthedocs.io/) for more details.",
epilog="""Example:\n
surprise -algo SVD -params "{'n_epochs': 5, 'verbose': True}"
-load-builtin ml-100k -n-folds 3""",
)
algo_choices = {
"NormalPredictor": NormalPredictor,
"BaselineOnly": BaselineOnly,
"KNNBasic": KNNBasic,
"KNNBaseline": KNNBaseline,
"KNNWithMeans": KNNWithMeans,
"SVD": SVD,
"SVDpp": SVDpp,
"NMF": NMF,
"SlopeOne": SlopeOne,
"CoClustering": CoClustering,
}
parser.add_argument(
"-algo",
type=str,
choices=algo_choices,
help="The prediction algorithm to use. "
+ "Allowed values are "
+ ", ".join(algo_choices.keys())
+ ".",
metavar="<prediction algorithm>",
)
parser.add_argument(
"-params",
type=str,
metavar="<algorithm parameters>",
default="{}",
help="A kwargs dictionary that contains all the "
+ "algorithm parameters."
+ "Example: \"{'n_epochs': 10}\".",
)
parser.add_argument(
"-load-builtin",
type=str,
dest="load_builtin",
metavar="<dataset name>",
default="ml-100k",
help="The name of the built-in dataset to use."
+ "Allowed values are "
+ ", ".join(dataset.BUILTIN_DATASETS.keys())
+ ". Default is ml-100k.",
)
parser.add_argument(
"-load-custom",
type=str,
dest="load_custom",
metavar="<file path>",
default=None,
help="A file path to custom dataset to use. "
+ "Ignored if "
+ "-loadbuiltin is set. The -reader parameter needs "
+ "to be set.",
)
parser.add_argument(
"-folds-files",
type=str,
dest="folds_files",
metavar="<train1 test1 train2 test2... >",
default=None,
help="A list of custom train and test files. "
+ "Ignored if -load-builtin or -load-custom is set. "
"The -reader parameter needs to be set.",
)
parser.add_argument(
"-reader",
type=str,
metavar="<reader>",
default=None,
help="A Reader to read the custom dataset. Example: "
+ "\"Reader(line_format='user item rating timestamp',"
+ " sep='\\t')\"",
)
parser.add_argument(
"-n-folds",
type=int,
dest="n_folds",
metavar="<number of folds>",
default=5,
help="The number of folds for cross-validation. " + "Default is 5.",
)
parser.add_argument(
"-seed",
type=int,
metavar="<random seed>",
default=None,
help="The seed to use for RNG. " + "Default is the current system time.",
)
parser.add_argument(
"--with-dump",
dest="with_dump",
action="store_true",
help="Dump the algorithm "
+ "results in a file (one file per fold). "
+ "Default is False.",
)
parser.add_argument(
"-dump-dir",
dest="dump_dir",
type=str,
metavar="<dir>",
default=None,
help="Where to dump the files. Ignored if "
+ "with-dump is not set. Default is "
+ os.path.join(get_dataset_dir(), "dumps/"),
)
parser.add_argument(
"--clean",
dest="clean",
action="store_true",
help="Remove the " + get_dataset_dir() + " directory and exit.",
)
parser.add_argument("-v", "--version", action="version", version=__version__)
args = parser.parse_args()
if args.clean:
folder = get_dataset_dir()
shutil.rmtree(folder)
print("Removed", folder)
exit()
# setup RNG
rd.seed(args.seed)
np.random.seed(args.seed)
# setup algorithm
params = eval(args.params)
if args.algo is None:
parser.error("No algorithm was specified.")
algo = algo_choices[args.algo](**params)
# setup dataset
if args.load_custom is not None: # load custom and split
if args.reader is None:
parser.error("-reader parameter is needed.")
reader = eval(args.reader)
data = Dataset.load_from_file(args.load_custom, reader=reader)
cv = KFold(n_splits=args.n_folds, random_state=args.seed)
elif args.folds_files is not None: # load from files
if args.reader is None:
parser.error("-reader parameter is needed.")
reader = eval(args.reader)
folds_files = args.folds_files.split()
folds_files = [
(folds_files[i], folds_files[i + 1])
for i in range(0, len(folds_files) - 1, 2)
]
data = Dataset.load_from_folds(folds_files=folds_files, reader=reader)
cv = PredefinedKFold()
else: # load builtin dataset and split
data = Dataset.load_builtin(args.load_builtin)
cv = KFold(n_splits=args.n_folds, random_state=args.seed)
cross_validate(algo, data, cv=cv, verbose=True)
if __name__ == "__main__":
main()
|
trainset.py | """This module contains the Trainset class."""
import numpy as np
class Trainset:
"""A trainset contains all useful data that constitute a training set.
It is used by the :meth:`fit()
<surprise.prediction_algorithms.algo_base.AlgoBase.fit>` method of every
prediction algorithm. You should not try to build such an object on your
own but rather use the :meth:`Dataset.folds()
<surprise.dataset.Dataset.folds>` method or the
:meth:`DatasetAutoFolds.build_full_trainset()
<surprise.dataset.DatasetAutoFolds.build_full_trainset>` method.
Trainsets are different from :class:`Datasets <surprise.dataset.Dataset>`.
You can think of a :class:`Dataset <surprise.dataset.Dataset>` as the raw
data, and Trainsets as higher-level data where useful methods are defined.
Also, a :class:`Dataset <surprise.dataset.Dataset>` may be comprised of
multiple Trainsets (e.g. when doing cross validation).
Attributes:
ur(:obj:`defaultdict` of :obj:`list`): The users ratings. This is a
dictionary containing lists of tuples of the form ``(item_inner_id,
rating)``. The keys are user inner ids.
ir(:obj:`defaultdict` of :obj:`list`): The items ratings. This is a
dictionary containing lists of tuples of the form ``(user_inner_id,
rating)``. The keys are item inner ids.
n_users: Total number of users :math:`|U|`.
n_items: Total number of items :math:`|I|`.
n_ratings: Total number of ratings :math:`|R_{train}|`.
rating_scale(tuple): The minimum and maximal rating of the rating
scale.
global_mean: The mean of all ratings :math:`\\mu`.
"""
def __init__(
self,
ur,
ir,
n_users,
n_items,
n_ratings,
rating_scale,
raw2inner_id_users,
raw2inner_id_items,
):
self.ur = ur
self.ir = ir
self.n_users = n_users
self.n_items = n_items
self.n_ratings = n_ratings
self.rating_scale = rating_scale
self._raw2inner_id_users = raw2inner_id_users
self._raw2inner_id_items = raw2inner_id_items
self._global_mean = None
# inner2raw dicts could be built right now (or even before) but they
# are not always useful so we wait until we need them.
self._inner2raw_id_users = None
self._inner2raw_id_items = None
def knows_user(self, uid):
"""Indicate if the user is part of the trainset.
A user is part of the trainset if the user has at least one rating.
Args:
uid(int): The (inner) user id. See :ref:`this
note<raw_inner_note>`.
Returns:
``True`` if user is part of the trainset, else ``False``.
"""
return uid in self.ur
def knows_item(self, iid):
"""Indicate if the item is part of the trainset.
An item is part of the trainset if the item was rated at least once.
Args:
iid(int): The (inner) item id. See :ref:`this
note<raw_inner_note>`.
Returns:
``True`` if item is part of the trainset, else ``False``.
"""
return iid in self.ir
def to_inner_uid(self, ruid):
"""Convert a **user** raw id to an inner id.
See :ref:`this note<raw_inner_note>`.
Args:
ruid(str): The user raw id.
Returns:
int: The user inner id.
Raises:
ValueError: When user is not part of the trainset.
"""
try:
return self._raw2inner_id_users[ruid]
except KeyError:
raise ValueError("User " + str(ruid) + " is not part of the trainset.")
def to_raw_uid(self, iuid):
"""Convert a **user** inner id to a raw id.
See :ref:`this note<raw_inner_note>`.
Args:
iuid(int): The user inner id.
Returns:
str: The user raw id.
Raises:
ValueError: When ``iuid`` is not an inner id.
"""
if self._inner2raw_id_users is None:
self._inner2raw_id_users = {
inner: raw for (raw, inner) in self._raw2inner_id_users.items()
}
try:
return self._inner2raw_id_users[iuid]
except KeyError:
raise ValueError(str(iuid) + " is not a valid inner id.")
def to_inner_iid(self, riid):
"""Convert an **item** raw id to an inner id.
See :ref:`this note<raw_inner_note>`.
Args:
riid(str): The item raw id.
Returns:
int: The item inner id.
Raises:
ValueError: When item is not part of the trainset.
"""
try:
return self._raw2inner_id_items[riid]
except KeyError:
raise ValueError("Item " + str(riid) + " is not part of the trainset.")
def to_raw_iid(self, iiid):
"""Convert an **item** inner id to a raw id.
See :ref:`this note<raw_inner_note>`.
Args:
iiid(int): The item inner id.
Returns:
str: The item raw id.
Raises:
ValueError: When ``iiid`` is not an inner id.
"""
if self._inner2raw_id_items is None:
self._inner2raw_id_items = {
inner: raw for (raw, inner) in self._raw2inner_id_items.items()
}
try:
return self._inner2raw_id_items[iiid]
except KeyError:
raise ValueError(str(iiid) + " is not a valid inner id.")
def all_ratings(self):
"""Generator function to iterate over all ratings.
Yields:
A tuple ``(uid, iid, rating)`` where ids are inner ids (see
:ref:`this note <raw_inner_note>`).
"""
for u, u_ratings in self.ur.items():
for i, r in u_ratings:
yield u, i, r
def build_testset(self):
"""Return a list of ratings that can be used as a testset in the
:meth:`test() <surprise.prediction_algorithms.algo_base.AlgoBase.test>`
method.
The ratings are all the ratings that are in the trainset, i.e. all the
ratings returned by the :meth:`all_ratings()
<surprise.Trainset.all_ratings>` generator. This is useful in
cases where you want to to test your algorithm on the trainset.
"""
return [
(self.to_raw_uid(u), self.to_raw_iid(i), r)
for (u, i, r) in self.all_ratings()
]
def build_anti_testset(self, fill=None):
"""Return a list of ratings that can be used as a testset in the
:meth:`test() <surprise.prediction_algorithms.algo_base.AlgoBase.test>`
method.
The ratings are all the ratings that are **not** in the trainset, i.e.
all the ratings :math:`r_{ui}` where the user :math:`u` is known, the
item :math:`i` is known, but the rating :math:`r_{ui}` is not in the
trainset. As :math:`r_{ui}` is unknown, it is either replaced by the
:code:`fill` value or assumed to be equal to the mean of all ratings
:meth:`global_mean <surprise.Trainset.global_mean>`.
Args:
fill(float): The value to fill unknown ratings. If :code:`None` the
global mean of all ratings :meth:`global_mean
<surprise.Trainset.global_mean>` will be used.
Returns:
A list of tuples ``(uid, iid, fill)`` where ids are raw ids.
"""
fill = self.global_mean if fill is None else float(fill)
anti_testset = []
for u in self.all_users():
user_items = {j for (j, _) in self.ur[u]}
anti_testset += [
(self.to_raw_uid(u), self.to_raw_iid(i), fill)
for i in self.all_items()
if i not in user_items
]
return anti_testset
def all_users(self):
"""Generator function to iterate over all users.
Yields:
Inner id of users.
"""
return range(self.n_users)
def all_items(self):
"""Generator function to iterate over all items.
Yields:
Inner id of items.
"""
return range(self.n_items)
@property
def global_mean(self):
if self._global_mean is None:
self._global_mean = np.mean([r for (_, _, r) in self.all_ratings()])
return self._global_mean
|
utils.py | """The utils module contains the get_rng function."""
import numbers
import numpy as np
def get_rng(random_state):
"""Return a 'validated' RNG.
If random_state is None, use RandomState singleton from numpy. Else if
it's an integer, consider it's a seed and initialized an rng with that
seed. If it's already an rng, return it.
"""
if random_state is None:
return np.random.mtrand._rand
elif isinstance(random_state, (numbers.Integral, np.integer)):
return np.random.RandomState(random_state)
if isinstance(random_state, np.random.RandomState):
return random_state
raise ValueError(
"Wrong random state. Expecting None, an int or a numpy "
"RandomState instance, got a "
"{}".format(type(random_state))
)
|
search.py | from abc import ABC, abstractmethod
from itertools import product
import numpy as np
from joblib import delayed, Parallel
from ..dataset import DatasetUserFolds
from ..utils import get_rng
from .split import get_cv
from .validation import fit_and_score
class BaseSearchCV(ABC):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(
self,
algo_class,
measures=["rmse", "mae"],
cv=None,
refit=False,
return_train_measures=False,
n_jobs=1,
pre_dispatch="2*n_jobs",
joblib_verbose=0,
):
self.algo_class = algo_class
self.measures = [measure.lower() for measure in measures]
self.cv = cv
if isinstance(refit, str):
if refit.lower() not in self.measures:
raise ValueError(
"It looks like the measure you want to use "
"with refit ({}) is not in the measures "
"parameter"
)
self.refit = refit.lower()
elif refit is True:
self.refit = self.measures[0]
else:
self.refit = False
self.return_train_measures = return_train_measures
self.n_jobs = n_jobs
self.pre_dispatch = pre_dispatch
self.joblib_verbose = joblib_verbose
def _parse_options(self, params):
# As sim_options and bsl_options are dictionaries, they require a
# special treatment.
if "sim_options" in params:
sim_options = params["sim_options"]
sim_options_list = [
dict(zip(sim_options, v)) for v in product(*sim_options.values())
]
params["sim_options"] = sim_options_list
if "bsl_options" in params:
bsl_options = params["bsl_options"]
bsl_options_list = [
dict(zip(bsl_options, v)) for v in product(*bsl_options.values())
]
params["bsl_options"] = bsl_options_list
return params
def fit(self, data):
"""Runs the ``fit()`` method of the algorithm for all parameter
combinations, over different splits given by the ``cv`` parameter.
Args:
data (:obj:`Dataset <surprise.dataset.Dataset>`): The dataset on
which to evaluate the algorithm, in parallel.
"""
if self.refit and isinstance(data, DatasetUserFolds):
raise ValueError(
"refit cannot be used when data has been "
"loaded with load_from_folds()."
)
cv = get_cv(self.cv)
delayed_list = (
delayed(fit_and_score)(
self.algo_class(**params),
trainset,
testset,
self.measures,
self.return_train_measures,
)
for params, (trainset, testset) in product(
self.param_combinations, cv.split(data)
)
)
out = Parallel(
n_jobs=self.n_jobs,
pre_dispatch=self.pre_dispatch,
verbose=self.joblib_verbose,
)(delayed_list)
(test_measures_dicts, train_measures_dicts, fit_times, test_times) = zip(*out)
# test_measures_dicts is a list of dict like this:
# [{'mae': 1, 'rmse': 2}, {'mae': 2, 'rmse': 3} ...]
# E.g. for 5 splits, the first 5 dicts are for the first param
# combination, the next 5 dicts are for the second param combination,
# etc...
# We convert it into a dict of list:
# {'mae': [1, 2, ...], 'rmse': [2, 3, ...]}
# Each list is still of size n_parameters_combinations * n_splits.
# Then, reshape each list to have 2-D arrays of shape
# (n_parameters_combinations, n_splits). This way we can easily compute
# the mean and std dev over all splits or over all param comb.
test_measures = dict()
train_measures = dict()
new_shape = (len(self.param_combinations), cv.get_n_folds())
for m in self.measures:
test_measures[m] = np.asarray([d[m] for d in test_measures_dicts])
test_measures[m] = test_measures[m].reshape(new_shape)
if self.return_train_measures:
train_measures[m] = np.asarray([d[m] for d in train_measures_dicts])
train_measures[m] = train_measures[m].reshape(new_shape)
cv_results = dict()
best_index = dict()
best_params = dict()
best_score = dict()
best_estimator = dict()
for m in self.measures:
# cv_results: set measures for each split and each param comb
for split in range(cv.get_n_folds()):
cv_results[f"split{split}_test_{m}"] = test_measures[m][:, split]
if self.return_train_measures:
cv_results[f"split{split}_train_{m}"] = train_measures[m][:, split]
# cv_results: set mean and std over all splits (testset and
# trainset) for each param comb
mean_test_measures = test_measures[m].mean(axis=1)
cv_results[f"mean_test_{m}"] = mean_test_measures
cv_results[f"std_test_{m}"] = test_measures[m].std(axis=1)
if self.return_train_measures:
mean_train_measures = train_measures[m].mean(axis=1)
cv_results[f"mean_train_{m}"] = mean_train_measures
cv_results[f"std_train_{m}"] = train_measures[m].std(axis=1)
# cv_results: set rank of each param comb
# also set best_index, and best_xxxx attributes
indices = cv_results[f"mean_test_{m}"].argsort()
cv_results[f"rank_test_{m}"] = np.empty_like(indices)
if m in ("mae", "rmse", "mse"):
cv_results[f"rank_test_{m}"][indices] = (
np.arange(len(indices)) + 1
) # sklearn starts at 1 as well
best_index[m] = mean_test_measures.argmin()
elif m in ("fcp",):
cv_results[f"rank_test_{m}"][indices] = np.arange(len(indices), 0, -1)
best_index[m] = mean_test_measures.argmax()
best_params[m] = self.param_combinations[best_index[m]]
best_score[m] = mean_test_measures[best_index[m]]
best_estimator[m] = self.algo_class(**best_params[m])
# Cv results: set fit and train times (mean, std)
fit_times = np.array(fit_times).reshape(new_shape)
test_times = np.array(test_times).reshape(new_shape)
for s, times in zip(("fit", "test"), (fit_times, test_times)):
cv_results[f"mean_{s}_time"] = times.mean(axis=1)
cv_results[f"std_{s}_time"] = times.std(axis=1)
# cv_results: set params key and each param_* values
cv_results["params"] = self.param_combinations
for param in self.param_combinations[0]:
cv_results["param_" + param] = [
comb[param] for comb in self.param_combinations
]
if self.refit:
best_estimator[self.refit].fit(data.build_full_trainset())
self.best_index = best_index
self.best_params = best_params
self.best_score = best_score
self.best_estimator = best_estimator
self.cv_results = cv_results
def test(self, testset, verbose=False):
"""Call ``test()`` on the estimator with the best found parameters
(according the the ``refit`` parameter). See :meth:`AlgoBase.test()
<surprise.prediction_algorithms.algo_base.AlgoBase.test>`.
Only available if ``refit`` is not ``False``.
"""
if not self.refit:
raise ValueError("refit is False, cannot use test()")
return self.best_estimator[self.refit].test(testset, verbose)
def predict(self, *args):
"""Call ``predict()`` on the estimator with the best found parameters
(according the the ``refit`` parameter). See :meth:`AlgoBase.predict()
<surprise.prediction_algorithms.algo_base.AlgoBase.predict>`.
Only available if ``refit`` is not ``False``.
"""
if not self.refit:
raise ValueError("refit is False, cannot use predict()")
return self.best_estimator[self.refit].predict(*args)
class GridSearchCV(BaseSearchCV):
"""The :class:`GridSearchCV` class computes accuracy metrics for an
algorithm on various combinations of parameters, over a cross-validation
procedure. This is useful for finding the best set of parameters for a
prediction algorithm. It is analogous to `GridSearchCV
<https://scikit-learn.org/stable/modules/generated/sklearn.
model_selection.GridSearchCV.html>`_ from scikit-learn.
See an example in the :ref:`User Guide <tuning_algorithm_parameters>`.
Args:
algo_class(:obj:`AlgoBase \
<surprise.prediction_algorithms.algo_base.AlgoBase>`): The class
of the algorithm to evaluate.
param_grid(dict): Dictionary with algorithm parameters as keys and
list of values as keys. All combinations will be evaluated with
desired algorithm. Dict parameters such as ``sim_options`` require
special treatment, see :ref:`this note<grid_search_note>`.
measures(list of string): The performance measures to compute. Allowed
names are function names as defined in the :mod:`accuracy
<surprise.accuracy>` module. Default is ``['rmse', 'mae']``.
cv(cross-validation iterator, int or ``None``): Determines how the
``data`` parameter will be split (i.e. how trainsets and testsets
will be defined). If an int is passed, :class:`KFold
<surprise.model_selection.split.KFold>` is used with the
appropriate ``n_splits`` parameter. If ``None``, :class:`KFold
<surprise.model_selection.split.KFold>` is used with
``n_splits=5``.
refit(bool or str): If ``True``, refit the algorithm on the whole
dataset using the set of parameters that gave the best average
performance for the first measure of ``measures``. Other measures
can be used by passing a string (corresponding to the measure
name). Then, you can use the ``test()`` and ``predict()`` methods.
``refit`` can only be used if the ``data`` parameter given to
``fit()`` hasn't been loaded with :meth:`load_from_folds()
<surprise.dataset.Dataset.load_from_folds>`. Default is ``False``.
return_train_measures(bool): Whether to compute performance measures on
the trainsets. If ``True``, the ``cv_results`` attribute will
also contain measures for trainsets. Default is ``False``.
n_jobs(int): The maximum number of parallel training procedures.
- If ``-1``, all CPUs are used.
- If ``1`` is given, no parallel computing code is used at all,\
which is useful for debugging.
- For ``n_jobs`` below ``-1``, ``(n_cpus + n_jobs + 1)`` are\
used. For example, with ``n_jobs = -2`` all CPUs but one are\
used.
Default is ``1``.
pre_dispatch(int or string): Controls the number of jobs that get
dispatched during parallel execution. Reducing this number can be
useful to avoid an explosion of memory consumption when more jobs
get dispatched than CPUs can process. This parameter can be:
- ``None``, in which case all the jobs are immediately created\
and spawned. Use this for lightweight and fast-running\
jobs, to avoid delays due to on-demand spawning of the\
jobs.
- An int, giving the exact number of total jobs that are\
spawned.
- A string, giving an expression as a function of ``n_jobs``,\
as in ``'2*n_jobs'``.
Default is ``'2*n_jobs'``.
joblib_verbose(int): Controls the verbosity of joblib: the higher, the
more messages.
Attributes:
best_estimator (dict of AlgoBase):
Using an accuracy measure as key, get the algorithm that gave the
best accuracy results for the chosen measure, averaged over all
splits.
best_score (dict of floats):
Using an accuracy measure as key, get the best average score
achieved for that measure.
best_params (dict of dicts):
Using an accuracy measure as key, get the parameters combination
that gave the best accuracy results for the chosen measure (on
average).
best_index (dict of ints):
Using an accuracy measure as key, get the index that can be used
with ``cv_results`` that achieved the highest accuracy for that
measure (on average).
cv_results (dict of arrays):
A dict that contains accuracy measures over all splits, as well as
train and test time for each parameter combination. Can be imported
into a pandas `DataFrame` (see :ref:`example
<cv_results_example>`).
"""
def __init__(
self,
algo_class,
param_grid,
measures=["rmse", "mae"],
cv=None,
refit=False,
return_train_measures=False,
n_jobs=1,
pre_dispatch="2*n_jobs",
joblib_verbose=0,
):
super().__init__(
algo_class=algo_class,
measures=measures,
cv=cv,
refit=refit,
return_train_measures=return_train_measures,
n_jobs=n_jobs,
pre_dispatch=pre_dispatch,
joblib_verbose=joblib_verbose,
)
self.param_grid = self._parse_options(param_grid.copy())
self.param_combinations = [
dict(zip(self.param_grid, v)) for v in product(*self.param_grid.values())
]
class RandomizedSearchCV(BaseSearchCV):
"""The :class:`RandomizedSearchCV` class computes accuracy metrics for an
algorithm on various combinations of parameters, over a cross-validation
procedure. As opposed to GridSearchCV, which uses an exhaustive
combinatorial approach, RandomizedSearchCV samples randomly from the
parameter space. This is useful for finding the best set of parameters
for a prediction algorithm, especially using a coarse to fine approach.
It is analogous to `RandomizedSearchCV <https://scikit-learn.org/stable/
modules/generated/sklearn.model_selection.RandomizedSearchCV.html>`_ from
scikit-learn.
See an example in the :ref:`User Guide <tuning_algorithm_parameters>`.
Args:
algo_class(:obj:`AlgoBase \
<surprise.prediction_algorithms.algo_base.AlgoBase>`): The class
of the algorithm to evaluate.
param_distributions(dict): Dictionary with algorithm parameters as
keys and distributions or lists of parameters to try. Distributions
must provide a rvs method for sampling (such as those from
scipy.stats.distributions). If a list is given, it is sampled
uniformly. Parameters will be sampled n_iter times.
n_iter(int): Number of times parameter settings are sampled. Default is
``10``.
measures(list of string): The performance measures to compute. Allowed
names are function names as defined in the :mod:`accuracy
<surprise.accuracy>` module. Default is ``['rmse', 'mae']``.
cv(cross-validation iterator, int or ``None``): Determines how the
``data`` parameter will be split (i.e. how trainsets and testsets
will be defined). If an int is passed, :class:`KFold
<surprise.model_selection.split.KFold>` is used with the
appropriate ``n_splits`` parameter. If ``None``, :class:`KFold
<surprise.model_selection.split.KFold>` is used with
``n_splits=5``.
refit(bool or str): If ``True``, refit the algorithm on the whole
dataset using the set of parameters that gave the best average
performance for the first measure of ``measures``. Other measures
can be used by passing a string (corresponding to the measure
name). Then, you can use the ``test()`` and ``predict()`` methods.
``refit`` can only be used if the ``data`` parameter given to
``fit()`` hasn't been loaded with :meth:`load_from_folds()
<surprise.dataset.Dataset.load_from_folds>`. Default is ``False``.
return_train_measures(bool): Whether to compute performance measures on
the trainsets. If ``True``, the ``cv_results`` attribute will
also contain measures for trainsets. Default is ``False``.
n_jobs(int): The maximum number of parallel training procedures.
- If ``-1``, all CPUs are used.
- If ``1`` is given, no parallel computing code is used at all,\
which is useful for debugging.
- For ``n_jobs`` below ``-1``, ``(n_cpus + n_jobs + 1)`` are\
used. For example, with ``n_jobs = -2`` all CPUs but one are\
used.
Default is ``1``.
pre_dispatch(int or string): Controls the number of jobs that get
dispatched during parallel execution. Reducing this number can be
useful to avoid an explosion of memory consumption when more jobs
get dispatched than CPUs can process. This parameter can be:
- ``None``, in which case all the jobs are immediately created\
and spawned. Use this for lightweight and fast-running\
jobs, to avoid delays due to on-demand spawning of the\
jobs.
- An int, giving the exact number of total jobs that are\
spawned.
- A string, giving an expression as a function of ``n_jobs``,\
as in ``'2*n_jobs'``.
Default is ``'2*n_jobs'``.
random_state(int, RandomState or None): Pseudo random number
generator seed used for random uniform sampling from lists of
possible values instead of scipy.stats distributions. If int,
``random_state`` is the seed used by the random number generator.
If ``RandomState`` instance, ``random_state`` is the random number
generator. If ``None``, the random number generator is the
RandomState instance used by ``np.random``. Default is ``None``.
joblib_verbose(int): Controls the verbosity of joblib: the higher, the
more messages.
Attributes:
best_estimator (dict of AlgoBase):
Using an accuracy measure as key, get the algorithm that gave the
best accuracy results for the chosen measure, averaged over all
splits.
best_score (dict of floats):
Using an accuracy measure as key, get the best average score
achieved for that measure.
best_params (dict of dicts):
Using an accuracy measure as key, get the parameters combination
that gave the best accuracy results for the chosen measure (on
average).
best_index (dict of ints):
Using an accuracy measure as key, get the index that can be used
with ``cv_results`` that achieved the highest accuracy for that
measure (on average).
cv_results (dict of arrays):
A dict that contains accuracy measures over all splits, as well as
train and test time for each parameter combination. Can be imported
into a pandas `DataFrame` (see :ref:`example
<cv_results_example>`).
"""
def __init__(
self,
algo_class,
param_distributions,
n_iter=10,
measures=["rmse", "mae"],
cv=None,
refit=False,
return_train_measures=False,
n_jobs=1,
pre_dispatch="2*n_jobs",
random_state=None,
joblib_verbose=0,
):
super().__init__(
algo_class=algo_class,
measures=measures,
cv=cv,
refit=refit,
return_train_measures=return_train_measures,
n_jobs=n_jobs,
pre_dispatch=pre_dispatch,
joblib_verbose=joblib_verbose,
)
self.n_iter = n_iter
self.random_state = random_state
self.param_distributions = self._parse_options(param_distributions.copy())
self.param_combinations = self._sample_parameters(
self.param_distributions, self.n_iter, self.random_state
)
@staticmethod
def _sample_parameters(param_distributions, n_iter, random_state=None):
"""Samples ``n_iter`` parameter combinations from
``param_distributions`` using ``random_state`` as a seed.
Non-deterministic iterable over random candidate combinations for
hyper-parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not
accept a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used
to define the parameter search space. Deterministic behavior is however
guaranteed from SciPy 0.16 onwards.
Args:
param_distributions(dict): Dictionary where the keys are
parameters and values are distributions from which a parameter
is to be sampled. Distributions either have to provide a
``rvs`` function to sample from them, or can be given as a list
of values, where a uniform distribution is assumed.
n_iter(int): Number of parameter settings produced.
Default is ``10``.
random_state(int, RandomState instance or None):
Pseudo random number generator seed used for random uniform
sampling from lists of possible values instead of scipy.stats
distributions. If ``None``, the random number generator is the
random state instance used by np.random. Default is ``None``.
Returns:
combos(list): List of parameter dictionaries with sampled values.
"""
# check if all distributions are given as lists
# if so, sample without replacement
all_lists = np.all(
[not hasattr(v, "rvs") for v in param_distributions.values()]
)
rnd = get_rng(random_state)
# sort for reproducibility
items = sorted(param_distributions.items())
if all_lists:
# create exhaustive combinations
param_grid = [
dict(zip(param_distributions, v))
for v in product(*param_distributions.values())
]
combos = np.random.choice(param_grid, n_iter, replace=False)
else:
combos = []
for _ in range(n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs(random_state=rnd)
else:
params[k] = v[rnd.randint(len(v))]
combos.append(params)
return combos
|
__init__.py | from .search import GridSearchCV, RandomizedSearchCV
from .split import (
KFold,
LeaveOneOut,
PredefinedKFold,
RepeatedKFold,
ShuffleSplit,
train_test_split,
)
from .validation import cross_validate
__all__ = [
"KFold",
"ShuffleSplit",
"train_test_split",
"RepeatedKFold",
"LeaveOneOut",
"PredefinedKFold",
"cross_validate",
"GridSearchCV",
"RandomizedSearchCV",
]
|
validation.py | """
The validation module contains the cross_validate function, inspired from
the mighty scikit learn.
"""
import time
import numpy as np
from joblib import delayed, Parallel
from .. import accuracy
from .split import get_cv
def cross_validate(
algo,
data,
measures=["rmse", "mae"],
cv=None,
return_train_measures=False,
n_jobs=1,
pre_dispatch="2*n_jobs",
verbose=False,
):
"""
Run a cross validation procedure for a given algorithm, reporting accuracy
measures and computation times.
See an example in the :ref:`User Guide <cross_validate_example>`.
Args:
algo(:obj:`AlgoBase \
<surprise.prediction_algorithms.algo_base.AlgoBase>`):
The algorithm to evaluate.
data(:obj:`Dataset <surprise.dataset.Dataset>`): The dataset on which
to evaluate the algorithm.
measures(list of string): The performance measures to compute. Allowed
names are function names as defined in the :mod:`accuracy
<surprise.accuracy>` module. Default is ``['rmse', 'mae']``.
cv(cross-validation iterator, int or ``None``): Determines how the
``data`` parameter will be split (i.e. how trainsets and testsets
will be defined). If an int is passed, :class:`KFold
<surprise.model_selection.split.KFold>` is used with the
appropriate ``n_splits`` parameter. If ``None``, :class:`KFold
<surprise.model_selection.split.KFold>` is used with
``n_splits=5``.
return_train_measures(bool): Whether to compute performance measures on
the trainsets. Default is ``False``.
n_jobs(int): The maximum number of folds evaluated in parallel.
- If ``-1``, all CPUs are used.
- If ``1`` is given, no parallel computing code is used at all,\
which is useful for debugging.
- For ``n_jobs`` below ``-1``, ``(n_cpus + n_jobs + 1)`` are\
used. For example, with ``n_jobs = -2`` all CPUs but one are\
used.
Default is ``1``.
pre_dispatch(int or string): Controls the number of jobs that get
dispatched during parallel execution. Reducing this number can be
useful to avoid an explosion of memory consumption when more jobs
get dispatched than CPUs can process. This parameter can be:
- ``None``, in which case all the jobs are immediately created\
and spawned. Use this for lightweight and fast-running\
jobs, to avoid delays due to on-demand spawning of the\
jobs.
- An int, giving the exact number of total jobs that are\
spawned.
- A string, giving an expression as a function of ``n_jobs``,\
as in ``'2*n_jobs'``.
Default is ``'2*n_jobs'``.
verbose(int): If ``True`` accuracy measures for each split are printed,
as well as train and test times. Averages and standard deviations
over all splits are also reported. Default is ``False``: nothing is
printed.
Returns:
dict: A dict with the following keys:
- ``'test_*'`` where ``*`` corresponds to a lower-case accuracy
measure, e.g. ``'test_rmse'``: numpy array with accuracy values
for each testset.
- ``'train_*'`` where ``*`` corresponds to a lower-case accuracy
measure, e.g. ``'train_rmse'``: numpy array with accuracy values
for each trainset. Only available if ``return_train_measures`` is
``True``.
- ``'fit_time'``: numpy array with the training time in seconds for
each split.
- ``'test_time'``: numpy array with the testing time in seconds for
each split.
"""
measures = [m.lower() for m in measures]
cv = get_cv(cv)
delayed_list = (
delayed(fit_and_score)(algo, trainset, testset, measures, return_train_measures)
for (trainset, testset) in cv.split(data)
)
out = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch)(delayed_list)
(test_measures_dicts, train_measures_dicts, fit_times, test_times) = zip(*out)
test_measures = dict()
train_measures = dict()
ret = dict()
for m in measures:
# transform list of dicts into dict of lists
# Same as in GridSearchCV.fit()
test_measures[m] = np.asarray([d[m] for d in test_measures_dicts])
ret["test_" + m] = test_measures[m]
if return_train_measures:
train_measures[m] = np.asarray([d[m] for d in train_measures_dicts])
ret["train_" + m] = train_measures[m]
ret["fit_time"] = fit_times
ret["test_time"] = test_times
if verbose:
print_summary(
algo,
measures,
test_measures,
train_measures,
fit_times,
test_times,
cv.n_splits,
)
return ret
def fit_and_score(algo, trainset, testset, measures, return_train_measures=False):
"""Helper method that trains an algorithm and compute accuracy measures on
a testset. Also report train and test times.
Args:
algo(:obj:`AlgoBase \
<surprise.prediction_algorithms.algo_base.AlgoBase>`):
The algorithm to use.
trainset(:obj:`Trainset <surprise.trainset.Trainset>`): The trainset.
testset(:obj:`testset`): The testset.
measures(list of string): The performance measures to compute. Allowed
names are function names as defined in the :mod:`accuracy
<surprise.accuracy>` module.
return_train_measures(bool): Whether to compute performance measures on
the trainset. Default is ``False``.
Returns:
tuple: A tuple containing:
- A dictionary mapping each accuracy metric to its value on the
testset (keys are lower case).
- A dictionary mapping each accuracy metric to its value on the
trainset (keys are lower case). This dict is empty if
return_train_measures is False.
- The fit time in seconds.
- The testing time in seconds.
"""
start_fit = time.time()
algo.fit(trainset)
fit_time = time.time() - start_fit
start_test = time.time()
predictions = algo.test(testset)
test_time = time.time() - start_test
if return_train_measures:
train_predictions = algo.test(trainset.build_testset())
test_measures = dict()
train_measures = dict()
for m in measures:
f = getattr(accuracy, m.lower())
test_measures[m] = f(predictions, verbose=0)
if return_train_measures:
train_measures[m] = f(train_predictions, verbose=0)
return test_measures, train_measures, fit_time, test_time
def print_summary(
algo, measures, test_measures, train_measures, fit_times, test_times, n_splits
):
"""Helper for printing the result of cross_validate."""
print(
"Evaluating {} of algorithm {} on {} split(s).".format(
", ".join(m.upper() for m in measures), algo.__class__.__name__, n_splits
)
)
print()
row_format = "{:<18}" + "{:<8}" * (n_splits + 2)
s = row_format.format(
"", *[f"Fold {i + 1}" for i in range(n_splits)] + ["Mean"] + ["Std"]
)
s += "\n"
s += "\n".join(
row_format.format(
key.upper() + " (testset)",
*[f"{v:1.4f}" for v in vals]
+ [f"{np.mean(vals):1.4f}"]
+ [f"{np.std(vals):1.4f}"],
)
for (key, vals) in test_measures.items()
)
if train_measures:
s += "\n"
s += "\n".join(
row_format.format(
key.upper() + " (trainset)",
*[f"{v:1.4f}" for v in vals]
+ [f"{np.mean(vals):1.4f}"]
+ [f"{np.std(vals):1.4f}"],
)
for (key, vals) in train_measures.items()
)
s += "\n"
s += row_format.format(
"Fit time",
*[f"{t:.2f}" for t in fit_times]
+ [f"{np.mean(fit_times):.2f}"]
+ [f"{np.std(fit_times):.2f}"],
)
s += "\n"
s += row_format.format(
"Test time",
*[f"{t:.2f}" for t in test_times]
+ [f"{np.mean(test_times):.2f}"]
+ [f"{np.std(test_times):.2f}"],
)
print(s)
|
split.py | """
The :mod:`model_selection.split<surprise.model_selection.split>` module
contains various cross-validation iterators. Design and tools are inspired from
the mighty scikit learn.
The available iterators are:
.. autosummary::
:nosignatures:
KFold
RepeatedKFold
ShuffleSplit
LeaveOneOut
PredefinedKFold
This module also contains a function for splitting datasets into trainset and
testset:
.. autosummary::
:nosignatures:
train_test_split
"""
import numbers
from collections import defaultdict
from itertools import chain
from math import ceil, floor
import numpy as np
from ..utils import get_rng
def get_cv(cv):
"""Return a 'validated' CV iterator."""
if cv is None:
return KFold(n_splits=5)
if isinstance(cv, numbers.Integral):
return KFold(n_splits=cv)
if hasattr(cv, "split") and not isinstance(cv, str):
return cv # str have split
raise ValueError(
"Wrong CV object. Expecting None, an int or CV iterator, "
"got a {}".format(type(cv))
)
class KFold:
"""A basic cross-validation iterator.
Each fold is used once as a testset while the k - 1 remaining folds are
used for training.
See an example in the :ref:`User Guide <use_cross_validation_iterators>`.
Args:
n_splits(int): The number of folds.
random_state(int, RandomState instance from numpy, or ``None``):
Determines the RNG that will be used for determining the folds. If
int, ``random_state`` will be used as a seed for a new RNG. This is
useful to get the same splits over multiple calls to ``split()``.
If RandomState instance, this same instance is used as RNG. If
``None``, the current RNG from numpy is used. ``random_state`` is
only used if ``shuffle`` is ``True``. Default is ``None``.
shuffle(bool): Whether to shuffle the ratings in the ``data`` parameter
of the ``split()`` method. Shuffling is not done in-place. Default
is ``True``.
"""
def __init__(self, n_splits=5, random_state=None, shuffle=True):
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, data):
"""Generator function to iterate over trainsets and testsets.
Args:
data(:obj:`Dataset<surprise.dataset.Dataset>`): The data containing
ratings that will be divided into trainsets and testsets.
Yields:
tuple of (trainset, testset)
"""
if self.n_splits > len(data.raw_ratings) or self.n_splits < 2:
raise ValueError(
"Incorrect value for n_splits={}. "
"Must be >=2 and less than the number "
"of ratings".format(len(data.raw_ratings))
)
# We use indices to avoid shuffling the original data.raw_ratings list.
indices = np.arange(len(data.raw_ratings))
if self.shuffle:
get_rng(self.random_state).shuffle(indices)
start, stop = 0, 0
for fold_i in range(self.n_splits):
start = stop
stop += len(indices) // self.n_splits
if fold_i < len(indices) % self.n_splits:
stop += 1
raw_trainset = [
data.raw_ratings[i] for i in chain(indices[:start], indices[stop:])
]
raw_testset = [data.raw_ratings[i] for i in indices[start:stop]]
trainset = data.construct_trainset(raw_trainset)
testset = data.construct_testset(raw_testset)
yield trainset, testset
def get_n_folds(self):
return self.n_splits
class RepeatedKFold:
"""
Repeated :class:`KFold` cross validator.
Repeats :class:`KFold` n times with different randomization in each
repetition.
See an example in the :ref:`User Guide <use_cross_validation_iterators>`.
Args:
n_splits(int): The number of folds.
n_repeats(int): The number of repetitions.
random_state(int, RandomState instance from numpy, or ``None``):
Determines the RNG that will be used for determining the folds. If
int, ``random_state`` will be used as a seed for a new RNG. This is
useful to get the same splits over multiple calls to ``split()``.
If RandomState instance, this same instance is used as RNG. If
``None``, the current RNG from numpy is used. ``random_state`` is
only used if ``shuffle`` is ``True``. Default is ``None``.
shuffle(bool): Whether to shuffle the ratings in the ``data`` parameter
of the ``split()`` method. Shuffling is not done in-place. Default
is ``True``.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
self.n_repeats = n_repeats
self.random_state = random_state
self.n_splits = n_splits
def split(self, data):
"""Generator function to iterate over trainsets and testsets.
Args:
data(:obj:`Dataset<surprise.dataset.Dataset>`): The data containing
ratings that will be divided into trainsets and testsets.
Yields:
tuple of (trainset, testset)
"""
rng = get_rng(self.random_state)
for _ in range(self.n_repeats):
cv = KFold(n_splits=self.n_splits, random_state=rng, shuffle=True)
yield from cv.split(data)
def get_n_folds(self):
return self.n_repeats * self.n_splits
class ShuffleSplit:
"""A basic cross-validation iterator with random trainsets and testsets.
Contrary to other cross-validation strategies, random splits do not
guarantee that all folds will be different, although this is still very
likely for sizeable datasets.
See an example in the :ref:`User Guide <use_cross_validation_iterators>`.
Args:
n_splits(int): The number of folds.
test_size(float or int ``None``): If float, it represents the
proportion of ratings to include in the testset. If int,
represents the absolute number of ratings in the testset. If
``None``, the value is set to the complement of the trainset size.
Default is ``.2``.
train_size(float or int or ``None``): If float, it represents the
proportion of ratings to include in the trainset. If int,
represents the absolute number of ratings in the trainset. If
``None``, the value is set to the complement of the testset size.
Default is ``None``.
random_state(int, RandomState instance from numpy, or ``None``):
Determines the RNG that will be used for determining the folds. If
int, ``random_state`` will be used as a seed for a new RNG. This is
useful to get the same splits over multiple calls to ``split()``.
If RandomState instance, this same instance is used as RNG. If
``None``, the current RNG from numpy is used. ``random_state`` is
only used if ``shuffle`` is ``True``. Default is ``None``.
shuffle(bool): Whether to shuffle the ratings in the ``data`` parameter
of the ``split()`` method. Shuffling is not done in-place. Setting
this to `False` defeats the purpose of this iterator, but it's
useful for the implementation of :func:`train_test_split`. Default
is ``True``.
"""
def __init__(
self,
n_splits=5,
test_size=0.2,
train_size=None,
random_state=None,
shuffle=True,
):
if n_splits <= 0:
raise ValueError(
"n_splits = {} should be strictly greater than " "0.".format(n_splits)
)
if test_size is not None and test_size <= 0:
raise ValueError(
"test_size={} should be strictly greater than " "0".format(test_size)
)
if train_size is not None and train_size <= 0:
raise ValueError(
"train_size={} should be strictly greater than " "0".format(train_size)
)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.shuffle = shuffle
def validate_train_test_sizes(self, test_size, train_size, n_ratings):
if test_size is not None and test_size >= n_ratings:
raise ValueError(
"test_size={} should be less than the number of "
"ratings {}".format(test_size, n_ratings)
)
if train_size is not None and train_size >= n_ratings:
raise ValueError(
"train_size={} should be less than the number of"
" ratings {}".format(train_size, n_ratings)
)
if np.asarray(test_size).dtype.kind == "f":
test_size = ceil(test_size * n_ratings)
if train_size is None:
train_size = n_ratings - test_size
elif np.asarray(train_size).dtype.kind == "f":
train_size = floor(train_size * n_ratings)
if test_size is None:
test_size = n_ratings - train_size
if train_size + test_size > n_ratings:
raise ValueError(
"The sum of train_size and test_size ({}) "
"should be smaller than the number of "
"ratings {}.".format(train_size + test_size, n_ratings)
)
return int(train_size), int(test_size)
def split(self, data):
"""Generator function to iterate over trainsets and testsets.
Args:
data(:obj:`Dataset<surprise.dataset.Dataset>`): The data containing
ratings that will be divided into trainsets and testsets.
Yields:
tuple of (trainset, testset)
"""
test_size, train_size = self.validate_train_test_sizes(
self.test_size, self.train_size, len(data.raw_ratings)
)
rng = get_rng(self.random_state)
for _ in range(self.n_splits):
if self.shuffle:
permutation = rng.permutation(len(data.raw_ratings))
else:
permutation = np.arange(len(data.raw_ratings))
raw_trainset = [data.raw_ratings[i] for i in permutation[:test_size]]
raw_testset = [
data.raw_ratings[i]
for i in permutation[test_size : (test_size + train_size)]
]
trainset = data.construct_trainset(raw_trainset)
testset = data.construct_testset(raw_testset)
yield trainset, testset
def get_n_folds(self):
return self.n_splits
def train_test_split(
data, test_size=0.2, train_size=None, random_state=None, shuffle=True
):
"""Split a dataset into trainset and testset.
See an example in the :ref:`User Guide <train_test_split_example>`.
Note: this function cannot be used as a cross-validation iterator.
Args:
data(:obj:`Dataset <surprise.dataset.Dataset>`): The dataset to split
into trainset and testset.
test_size(float or int ``None``): If float, it represents the
proportion of ratings to include in the testset. If int,
represents the absolute number of ratings in the testset. If
``None``, the value is set to the complement of the trainset size.
Default is ``.2``.
train_size(float or int or ``None``): If float, it represents the
proportion of ratings to include in the trainset. If int,
represents the absolute number of ratings in the trainset. If
``None``, the value is set to the complement of the testset size.
Default is ``None``.
random_state(int, RandomState instance from numpy, or ``None``):
Determines the RNG that will be used for determining the folds. If
int, ``random_state`` will be used as a seed for a new RNG. This is
useful to get the same splits over multiple calls to ``split()``.
If RandomState instance, this same instance is used as RNG. If
``None``, the current RNG from numpy is used. ``random_state`` is
only used if ``shuffle`` is ``True``. Default is ``None``.
shuffle(bool): Whether to shuffle the ratings in the ``data``
parameter. Shuffling is not done in-place. Default is ``True``.
"""
ss = ShuffleSplit(
n_splits=1,
test_size=test_size,
train_size=train_size,
random_state=random_state,
shuffle=shuffle,
)
return next(ss.split(data))
class LeaveOneOut:
"""Cross-validation iterator where each user has exactly one rating in the
testset.
Contrary to other cross-validation strategies, ``LeaveOneOut`` does not
guarantee that all folds will be different, although this is still very
likely for sizeable datasets.
See an example in the :ref:`User Guide <use_cross_validation_iterators>`.
Args:
n_splits(int): The number of folds.
random_state(int, RandomState instance from numpy, or ``None``):
Determines the RNG that will be used for determining the folds. If
int, ``random_state`` will be used as a seed for a new RNG. This is
useful to get the same splits over multiple calls to ``split()``.
If RandomState instance, this same instance is used as RNG. If
``None``, the current RNG from numpy is used. ``random_state`` is
only used if ``shuffle`` is ``True``. Default is ``None``.
min_n_ratings(int): Minimum number of ratings for each user in the
trainset. E.g. if ``min_n_ratings`` is ``2``, we are sure each user
has at least ``2`` ratings in the trainset (and ``1`` in the
testset). Other users are discarded. Default is ``0``, so some
users (having only one rating) may be in the testset and not in the
trainset.
"""
def __init__(self, n_splits=5, random_state=None, min_n_ratings=0):
self.n_splits = n_splits
self.random_state = random_state
self.min_n_ratings = min_n_ratings
def split(self, data):
"""Generator function to iterate over trainsets and testsets.
Args:
data(:obj:`Dataset<surprise.dataset.Dataset>`): The data containing
ratings that will be divided into trainsets and testsets.
Yields:
tuple of (trainset, testset)
"""
# map ratings to the users ids
user_ratings = defaultdict(list)
for uid, iid, r_ui, _ in data.raw_ratings:
user_ratings[uid].append((uid, iid, r_ui, None))
rng = get_rng(self.random_state)
for _ in range(self.n_splits):
# for each user, randomly choose a rating and put it in the
# testset.
raw_trainset, raw_testset = [], []
for uid, ratings in user_ratings.items():
if len(ratings) > self.min_n_ratings:
i = rng.randint(0, len(ratings))
raw_testset.append(ratings[i])
raw_trainset += [
rating for (j, rating) in enumerate(ratings) if j != i
]
if not raw_trainset:
raise ValueError(
"Could not build any trainset. Maybe " "min_n_ratings is too high?"
)
trainset = data.construct_trainset(raw_trainset)
testset = data.construct_testset(raw_testset)
yield trainset, testset
def get_n_folds(self):
return self.n_splits
class PredefinedKFold:
"""A cross-validation iterator to when a dataset has been loaded with the
:meth:`load_from_folds <surprise.dataset.Dataset.load_from_folds>`
method.
See an example in the :ref:`User Guide <load_from_folds_example>`.
"""
def split(self, data):
"""Generator function to iterate over trainsets and testsets.
Args:
data(:obj:`Dataset<surprise.dataset.Dataset>`): The data containing
ratings that will be divided into trainsets and testsets.
Yields:
tuple of (trainset, testset)
"""
self.n_splits = len(data.folds_files)
for train_file, test_file in data.folds_files:
raw_trainset = data.read_ratings(train_file)
raw_testset = data.read_ratings(test_file)
trainset = data.construct_trainset(raw_trainset)
testset = data.construct_testset(raw_testset)
yield trainset, testset
def get_n_folds(self):
return self.n_splits
|
knns.py | """
the :mod:`knns` module includes some k-NN inspired algorithms.
"""
import heapq
import numpy as np
from .algo_base import AlgoBase
from .predictions import PredictionImpossible
# Important note: as soon as an algorithm uses a similarity measure, it should
# also allow the bsl_options parameter because of the pearson_baseline
# similarity. It can be done explicitly (e.g. KNNBaseline), or implicetely
# using kwargs (e.g. KNNBasic).
class SymmetricAlgo(AlgoBase):
"""This is an abstract class aimed to ease the use of symmetric algorithms.
A symmetric algorithm is an algorithm that can can be based on users or on
items indifferently, e.g. all the algorithms in this module.
When the algo is user-based x denotes a user and y an item. Else, it's
reversed.
"""
def __init__(self, sim_options={}, verbose=True, **kwargs):
AlgoBase.__init__(self, sim_options=sim_options, **kwargs)
self.verbose = verbose
def fit(self, trainset):
AlgoBase.fit(self, trainset)
ub = self.sim_options["user_based"]
self.n_x = self.trainset.n_users if ub else self.trainset.n_items
self.n_y = self.trainset.n_items if ub else self.trainset.n_users
self.xr = self.trainset.ur if ub else self.trainset.ir
self.yr = self.trainset.ir if ub else self.trainset.ur
return self
def switch(self, u_stuff, i_stuff):
"""Return x_stuff and y_stuff depending on the user_based field."""
if self.sim_options["user_based"]:
return u_stuff, i_stuff
else:
return i_stuff, u_stuff
class KNNBasic(SymmetricAlgo):
"""A basic collaborative filtering algorithm.
The prediction :math:`\\hat{r}_{ui}` is set as:
.. math::
\\hat{r}_{ui} = \\frac{
\\sum\\limits_{v \\in N^k_i(u)} \\text{sim}(u, v) \\cdot r_{vi}}
{\\sum\\limits_{v \\in N^k_i(u)} \\text{sim}(u, v)}
or
.. math::
\\hat{r}_{ui} = \\frac{
\\sum\\limits_{j \\in N^k_u(i)} \\text{sim}(i, j) \\cdot r_{uj}}
{\\sum\\limits_{j \\in N^k_u(i)} \\text{sim}(i, j)}
depending on the ``user_based`` field of the ``sim_options`` parameter.
Args:
k(int): The (max) number of neighbors to take into account for
aggregation (see :ref:`this note <actual_k_note>`). Default is
``40``.
min_k(int): The minimum number of neighbors to take into account for
aggregation. If there are not enough neighbors, the prediction is
set to the global mean of all ratings. Default is ``1``.
sim_options(dict): A dictionary of options for the similarity
measure. See :ref:`similarity_measures_configuration` for accepted
options.
verbose(bool): Whether to print trace messages of bias estimation,
similarity, etc. Default is True.
"""
def __init__(self, k=40, min_k=1, sim_options={}, verbose=True, **kwargs):
SymmetricAlgo.__init__(self, sim_options=sim_options, verbose=verbose, **kwargs)
self.k = k
self.min_k = min_k
def fit(self, trainset):
SymmetricAlgo.fit(self, trainset)
self.sim = self.compute_similarities()
return self
def estimate(self, u, i):
if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):
raise PredictionImpossible("User and/or item is unknown.")
x, y = self.switch(u, i)
neighbors = [(self.sim[x, x2], r) for (x2, r) in self.yr[y]]
k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[0])
# compute weighted average
sum_sim = sum_ratings = actual_k = 0
for (sim, r) in k_neighbors:
if sim > 0:
sum_sim += sim
sum_ratings += sim * r
actual_k += 1
if actual_k < self.min_k:
raise PredictionImpossible("Not enough neighbors.")
est = sum_ratings / sum_sim
details = {"actual_k": actual_k}
return est, details
class KNNWithMeans(SymmetricAlgo):
"""A basic collaborative filtering algorithm, taking into account the mean
ratings of each user.
The prediction :math:`\\hat{r}_{ui}` is set as:
.. math::
\\hat{r}_{ui} = \\mu_u + \\frac{ \\sum\\limits_{v \\in N^k_i(u)}
\\text{sim}(u, v) \\cdot (r_{vi} - \\mu_v)} {\\sum\\limits_{v \\in
N^k_i(u)} \\text{sim}(u, v)}
or
.. math::
\\hat{r}_{ui} = \\mu_i + \\frac{ \\sum\\limits_{j \\in N^k_u(i)}
\\text{sim}(i, j) \\cdot (r_{uj} - \\mu_j)} {\\sum\\limits_{j \\in
N^k_u(i)} \\text{sim}(i, j)}
depending on the ``user_based`` field of the ``sim_options`` parameter.
Args:
k(int): The (max) number of neighbors to take into account for
aggregation (see :ref:`this note <actual_k_note>`). Default is
``40``.
min_k(int): The minimum number of neighbors to take into account for
aggregation. If there are not enough neighbors, the neighbor
aggregation is set to zero (so the prediction ends up being
equivalent to the mean :math:`\\mu_u` or :math:`\\mu_i`). Default is
``1``.
sim_options(dict): A dictionary of options for the similarity
measure. See :ref:`similarity_measures_configuration` for accepted
options.
verbose(bool): Whether to print trace messages of bias estimation,
similarity, etc. Default is True.
"""
def __init__(self, k=40, min_k=1, sim_options={}, verbose=True, **kwargs):
SymmetricAlgo.__init__(self, sim_options=sim_options, verbose=verbose, **kwargs)
self.k = k
self.min_k = min_k
def fit(self, trainset):
SymmetricAlgo.fit(self, trainset)
self.sim = self.compute_similarities()
self.means = np.zeros(self.n_x)
for x, ratings in self.xr.items():
self.means[x] = np.mean([r for (_, r) in ratings])
return self
def estimate(self, u, i):
if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):
raise PredictionImpossible("User and/or item is unknown.")
x, y = self.switch(u, i)
neighbors = [(x2, self.sim[x, x2], r) for (x2, r) in self.yr[y]]
k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[1])
est = self.means[x]
# compute weighted average
sum_sim = sum_ratings = actual_k = 0
for (nb, sim, r) in k_neighbors:
if sim > 0:
sum_sim += sim
sum_ratings += sim * (r - self.means[nb])
actual_k += 1
if actual_k < self.min_k:
sum_ratings = 0
try:
est += sum_ratings / sum_sim
except ZeroDivisionError:
pass # return mean
details = {"actual_k": actual_k}
return est, details
class KNNBaseline(SymmetricAlgo):
"""A basic collaborative filtering algorithm taking into account a
*baseline* rating.
The prediction :math:`\\hat{r}_{ui}` is set as:
.. math::
\\hat{r}_{ui} = b_{ui} + \\frac{ \\sum\\limits_{v \\in N^k_i(u)}
\\text{sim}(u, v) \\cdot (r_{vi} - b_{vi})} {\\sum\\limits_{v \\in
N^k_i(u)} \\text{sim}(u, v)}
or
.. math::
\\hat{r}_{ui} = b_{ui} + \\frac{ \\sum\\limits_{j \\in N^k_u(i)}
\\text{sim}(i, j) \\cdot (r_{uj} - b_{uj})} {\\sum\\limits_{j \\in
N^k_u(i)} \\text{sim}(i, j)}
depending on the ``user_based`` field of the ``sim_options`` parameter. For
the best predictions, use the :func:`pearson_baseline
<surprise.similarities.pearson_baseline>` similarity measure.
This algorithm corresponds to formula (3), section 2.2 of
:cite:`Koren:2010`.
Args:
k(int): The (max) number of neighbors to take into account for
aggregation (see :ref:`this note <actual_k_note>`). Default is
``40``.
min_k(int): The minimum number of neighbors to take into account for
aggregation. If there are not enough neighbors, the neighbor
aggregation is set to zero (so the prediction ends up being
equivalent to the baseline). Default is ``1``.
sim_options(dict): A dictionary of options for the similarity
measure. See :ref:`similarity_measures_configuration` for accepted
options. It is recommended to use the :func:`pearson_baseline
<surprise.similarities.pearson_baseline>` similarity measure.
bsl_options(dict): A dictionary of options for the baseline estimates
computation. See :ref:`baseline_estimates_configuration` for
accepted options.
verbose(bool): Whether to print trace messages of bias estimation,
similarity, etc. Default is True.
"""
def __init__(
self, k=40, min_k=1, sim_options={}, bsl_options={}, verbose=True, **kwargs
):
SymmetricAlgo.__init__(
self,
sim_options=sim_options,
bsl_options=bsl_options,
verbose=verbose,
**kwargs
)
self.k = k
self.min_k = min_k
def fit(self, trainset):
SymmetricAlgo.fit(self, trainset)
self.bu, self.bi = self.compute_baselines()
self.bx, self.by = self.switch(self.bu, self.bi)
self.sim = self.compute_similarities()
return self
def estimate(self, u, i):
est = self.trainset.global_mean
if self.trainset.knows_user(u):
est += self.bu[u]
if self.trainset.knows_item(i):
est += self.bi[i]
x, y = self.switch(u, i)
if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):
return est
neighbors = [(x2, self.sim[x, x2], r) for (x2, r) in self.yr[y]]
k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[1])
# compute weighted average
sum_sim = sum_ratings = actual_k = 0
for (nb, sim, r) in k_neighbors:
if sim > 0:
sum_sim += sim
nb_bsl = self.trainset.global_mean + self.bx[nb] + self.by[y]
sum_ratings += sim * (r - nb_bsl)
actual_k += 1
if actual_k < self.min_k:
sum_ratings = 0
try:
est += sum_ratings / sum_sim
except ZeroDivisionError:
pass # just baseline again
details = {"actual_k": actual_k}
return est, details
class KNNWithZScore(SymmetricAlgo):
"""A basic collaborative filtering algorithm, taking into account
the z-score normalization of each user.
The prediction :math:`\\hat{r}_{ui}` is set as:
.. math::
\\hat{r}_{ui} = \\mu_u + \\sigma_u \\frac{ \\sum\\limits_{v \\in N^k_i(u)}
\\text{sim}(u, v) \\cdot (r_{vi} - \\mu_v) / \\sigma_v} {\\sum\\limits_{v
\\in N^k_i(u)} \\text{sim}(u, v)}
or
.. math::
\\hat{r}_{ui} = \\mu_i + \\sigma_i \\frac{ \\sum\\limits_{j \\in N^k_u(i)}
\\text{sim}(i, j) \\cdot (r_{uj} - \\mu_j) / \\sigma_j} {\\sum\\limits_{j
\\in N^k_u(i)} \\text{sim}(i, j)}
depending on the ``user_based`` field of the ``sim_options`` parameter.
If :math:`\\sigma` is 0, than the overall sigma is used in that case.
Args:
k(int): The (max) number of neighbors to take into account for
aggregation (see :ref:`this note <actual_k_note>`). Default is
``40``.
min_k(int): The minimum number of neighbors to take into account for
aggregation. If there are not enough neighbors, the neighbor
aggregation is set to zero (so the prediction ends up being
equivalent to the mean :math:`\\mu_u` or :math:`\\mu_i`). Default is
``1``.
sim_options(dict): A dictionary of options for the similarity
measure. See :ref:`similarity_measures_configuration` for accepted
options.
verbose(bool): Whether to print trace messages of bias estimation,
similarity, etc. Default is True.
"""
def __init__(self, k=40, min_k=1, sim_options={}, verbose=True, **kwargs):
SymmetricAlgo.__init__(self, sim_options=sim_options, verbose=verbose, **kwargs)
self.k = k
self.min_k = min_k
def fit(self, trainset):
SymmetricAlgo.fit(self, trainset)
self.means = np.zeros(self.n_x)
self.sigmas = np.zeros(self.n_x)
# when certain sigma is 0, use overall sigma
self.overall_sigma = np.std([r for (_, _, r) in self.trainset.all_ratings()])
for x, ratings in self.xr.items():
self.means[x] = np.mean([r for (_, r) in ratings])
sigma = np.std([r for (_, r) in ratings])
self.sigmas[x] = self.overall_sigma if sigma == 0.0 else sigma
self.sim = self.compute_similarities()
return self
def estimate(self, u, i):
if not (self.trainset.knows_user(u) and self.trainset.knows_item(i)):
raise PredictionImpossible("User and/or item is unknown.")
x, y = self.switch(u, i)
neighbors = [(x2, self.sim[x, x2], r) for (x2, r) in self.yr[y]]
k_neighbors = heapq.nlargest(self.k, neighbors, key=lambda t: t[1])
est = self.means[x]
# compute weighted average
sum_sim = sum_ratings = actual_k = 0
for (nb, sim, r) in k_neighbors:
if sim > 0:
sum_sim += sim
sum_ratings += sim * (r - self.means[nb]) / self.sigmas[nb]
actual_k += 1
if actual_k < self.min_k:
sum_ratings = 0
try:
est += sum_ratings / sum_sim * self.sigmas[x]
except ZeroDivisionError:
pass # return mean
details = {"actual_k": actual_k}
return est, details
|
algo_base.py | """
The :mod:`surprise.prediction_algorithms.algo_base` module defines the base
class :class:`AlgoBase` from which every single prediction algorithm has to
inherit.
"""
import heapq
from .. import similarities as sims
from .optimize_baselines import baseline_als, baseline_sgd
from .predictions import Prediction, PredictionImpossible
class AlgoBase:
"""Abstract class where is defined the basic behavior of a prediction
algorithm.
Keyword Args:
baseline_options(dict, optional): If the algorithm needs to compute a
baseline estimate, the ``baseline_options`` parameter is used to
configure how they are computed. See
:ref:`baseline_estimates_configuration` for usage.
"""
def __init__(self, **kwargs):
self.bsl_options = kwargs.get("bsl_options", {})
self.sim_options = kwargs.get("sim_options", {})
if "user_based" not in self.sim_options:
self.sim_options["user_based"] = True
def fit(self, trainset):
"""Train an algorithm on a given training set.
This method is called by every derived class as the first basic step
for training an algorithm. It basically just initializes some internal
structures and set the self.trainset attribute.
Args:
trainset(:obj:`Trainset <surprise.Trainset>`) : A training
set, as returned by the :meth:`folds
<surprise.dataset.Dataset.folds>` method.
Returns:
self
"""
self.trainset = trainset
# (re) Initialise baselines
self.bu = self.bi = None
return self
def predict(self, uid, iid, r_ui=None, clip=True, verbose=False):
"""Compute the rating prediction for given user and item.
The ``predict`` method converts raw ids to inner ids and then calls the
``estimate`` method which is defined in every derived class. If the
prediction is impossible (e.g. because the user and/or the item is
unknown), the prediction is set according to
:meth:`default_prediction()
<surprise.prediction_algorithms.algo_base.AlgoBase.default_prediction>`.
Args:
uid: (Raw) id of the user. See :ref:`this note<raw_inner_note>`.
iid: (Raw) id of the item. See :ref:`this note<raw_inner_note>`.
r_ui(float): The true rating :math:`r_{ui}`. Optional, default is
``None``.
clip(bool): Whether to clip the estimation into the rating scale.
For example, if :math:`\\hat{r}_{ui}` is :math:`5.5` while the
rating scale is :math:`[1, 5]`, then :math:`\\hat{r}_{ui}` is
set to :math:`5`. Same goes if :math:`\\hat{r}_{ui} < 1`.
Default is ``True``.
verbose(bool): Whether to print details of the prediction. Default
is False.
Returns:
A :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>` object
containing:
- The (raw) user id ``uid``.
- The (raw) item id ``iid``.
- The true rating ``r_ui`` (:math:`r_{ui}`).
- The estimated rating (:math:`\\hat{r}_{ui}`).
- Some additional details about the prediction that might be useful
for later analysis.
"""
# Convert raw ids to inner ids
try:
iuid = self.trainset.to_inner_uid(uid)
except ValueError:
iuid = "UKN__" + str(uid)
try:
iiid = self.trainset.to_inner_iid(iid)
except ValueError:
iiid = "UKN__" + str(iid)
details = {}
try:
est = self.estimate(iuid, iiid)
# If the details dict was also returned
if isinstance(est, tuple):
est, details = est
details["was_impossible"] = False
except PredictionImpossible as e:
est = self.default_prediction()
details["was_impossible"] = True
details["reason"] = str(e)
# clip estimate into [lower_bound, higher_bound]
if clip:
lower_bound, higher_bound = self.trainset.rating_scale
est = min(higher_bound, est)
est = max(lower_bound, est)
pred = Prediction(uid, iid, r_ui, est, details)
if verbose:
print(pred)
return pred
def default_prediction(self):
"""Used when the ``PredictionImpossible`` exception is raised during a
call to :meth:`predict()
<surprise.prediction_algorithms.algo_base.AlgoBase.predict>`. By
default, return the global mean of all ratings (can be overridden in
child classes).
Returns:
(float): The mean of all ratings in the trainset.
"""
return self.trainset.global_mean
def test(self, testset, verbose=False):
"""Test the algorithm on given testset, i.e. estimate all the ratings
in the given testset.
Args:
testset: A test set, as returned by a :ref:`cross-validation
itertor<use_cross_validation_iterators>` or by the
:meth:`build_testset() <surprise.Trainset.build_testset>`
method.
verbose(bool): Whether to print details for each predictions.
Default is False.
Returns:
A list of :class:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>` objects
that contains all the estimated ratings.
"""
# The ratings are translated back to their original scale.
predictions = [
self.predict(uid, iid, r_ui_trans, verbose=verbose)
for (uid, iid, r_ui_trans) in testset
]
return predictions
def compute_baselines(self):
"""Compute users and items baselines.
The way baselines are computed depends on the ``bsl_options`` parameter
passed at the creation of the algorithm (see
:ref:`baseline_estimates_configuration`).
This method is only relevant for algorithms using :func:`Pearson
baseline similarity<surprise.similarities.pearson_baseline>` or the
:class:`BaselineOnly
<surprise.prediction_algorithms.baseline_only.BaselineOnly>` algorithm.
Returns:
A tuple ``(bu, bi)``, which are users and items baselines."""
# Firt of, if this method has already been called before on the same
# trainset, then just return. Indeed, compute_baselines may be called
# more than one time, for example when a similarity metric (e.g.
# pearson_baseline) uses baseline estimates.
if self.bu is not None:
return self.bu, self.bi
method = dict(als=baseline_als, sgd=baseline_sgd)
method_name = self.bsl_options.get("method", "als")
try:
if getattr(self, "verbose", False):
print("Estimating biases using", method_name + "...")
self.bu, self.bi = method[method_name](self)
return self.bu, self.bi
except KeyError:
raise ValueError(
"Invalid method "
+ method_name
+ " for baseline computation."
+ " Available methods are als and sgd."
)
def compute_similarities(self):
"""Build the similarity matrix.
The way the similarity matrix is computed depends on the
``sim_options`` parameter passed at the creation of the algorithm (see
:ref:`similarity_measures_configuration`).
This method is only relevant for algorithms using a similarity measure,
such as the :ref:`k-NN algorithms <pred_package_knn_inpired>`.
Returns:
The similarity matrix."""
construction_func = {
"cosine": sims.cosine,
"msd": sims.msd,
"pearson": sims.pearson,
"pearson_baseline": sims.pearson_baseline,
}
if self.sim_options["user_based"]:
n_x, yr = self.trainset.n_users, self.trainset.ir
else:
n_x, yr = self.trainset.n_items, self.trainset.ur
min_support = self.sim_options.get("min_support", 1)
args = [n_x, yr, min_support]
name = self.sim_options.get("name", "msd").lower()
if name == "pearson_baseline":
shrinkage = self.sim_options.get("shrinkage", 100)
bu, bi = self.compute_baselines()
if self.sim_options["user_based"]:
bx, by = bu, bi
else:
bx, by = bi, bu
args += [self.trainset.global_mean, bx, by, shrinkage]
try:
if getattr(self, "verbose", False):
print(f"Computing the {name} similarity matrix...")
sim = construction_func[name](*args)
if getattr(self, "verbose", False):
print("Done computing similarity matrix.")
return sim
except KeyError:
raise NameError(
"Wrong sim name "
+ name
+ ". Allowed values "
+ "are "
+ ", ".join(construction_func.keys())
+ "."
)
def get_neighbors(self, iid, k):
"""Return the ``k`` nearest neighbors of ``iid``, which is the inner id
of a user or an item, depending on the ``user_based`` field of
``sim_options`` (see :ref:`similarity_measures_configuration`).
As the similarities are computed on the basis of a similarity measure,
this method is only relevant for algorithms using a similarity measure,
such as the :ref:`k-NN algorithms <pred_package_knn_inpired>`.
For a usage example, see the :ref:`FAQ <get_k_nearest_neighbors>`.
Args:
iid(int): The (inner) id of the user (or item) for which we want
the nearest neighbors. See :ref:`this note<raw_inner_note>`.
k(int): The number of neighbors to retrieve.
Returns:
The list of the ``k`` (inner) ids of the closest users (or items)
to ``iid``.
"""
if self.sim_options["user_based"]:
all_instances = self.trainset.all_users
else:
all_instances = self.trainset.all_items
others = [(x, self.sim[iid, x]) for x in all_instances() if x != iid]
others = heapq.nlargest(k, others, key=lambda tple: tple[1])
k_nearest_neighbors = [j for (j, _) in others]
return k_nearest_neighbors
|
random_pred.py | """ Algorithm predicting a random rating.
"""
import numpy as np
from .algo_base import AlgoBase
class NormalPredictor(AlgoBase):
"""Algorithm predicting a random rating based on the distribution of the
training set, which is assumed to be normal.
The prediction :math:`\\hat{r}_{ui}` is generated from a normal distribution
:math:`\\mathcal{N}(\\hat{\\mu}, \\hat{\\sigma}^2)` where :math:`\\hat{\\mu}` and
:math:`\\hat{\\sigma}` are estimated from the training data using Maximum
Likelihood Estimation:
.. math::
\\hat{\\mu} &= \\frac{1}{|R_{train}|} \\sum_{r_{ui} \\in R_{train}}
r_{ui}\\\\\\\\\
\\hat{\\sigma} &= \\sqrt{\\sum_{r_{ui} \\in R_{train}}
\\frac{(r_{ui} - \\hat{\\mu})^2}{|R_{train}|}}
"""
def __init__(self):
AlgoBase.__init__(self)
def fit(self, trainset):
AlgoBase.fit(self, trainset)
num = sum(
(r - self.trainset.global_mean) ** 2
for (_, _, r) in self.trainset.all_ratings()
)
denum = self.trainset.n_ratings
self.sigma = np.sqrt(num / denum)
return self
def estimate(self, *_):
return np.random.normal(self.trainset.global_mean, self.sigma)
|
predictions.py | """
The :mod:`surprise.prediction_algorithms.predictions` module defines the
:class:`Prediction` named tuple and the :class:`PredictionImpossible`
exception.
"""
from collections import namedtuple
class PredictionImpossible(Exception):
r"""Exception raised when a prediction is impossible.
When raised, the estimation :math:`\hat{r}_{ui}` is set to the global mean
of all ratings :math:`\mu`.
"""
pass
class Prediction(namedtuple("Prediction", ["uid", "iid", "r_ui", "est", "details"])):
"""A named tuple for storing the results of a prediction.
It's wrapped in a class, but only for documentation and printing purposes.
Args:
uid: The (raw) user id. See :ref:`this note<raw_inner_note>`.
iid: The (raw) item id. See :ref:`this note<raw_inner_note>`.
r_ui(float): The true rating :math:`r_{ui}`.
est(float): The estimated rating :math:`\\hat{r}_{ui}`.
details (dict): Stores additional details about the prediction that
might be useful for later analysis.
"""
__slots__ = () # for memory saving purpose.
def __str__(self):
s = f"user: {self.uid:<10} "
s += f"item: {self.iid:<10} "
if self.r_ui is not None:
s += f"r_ui = {self.r_ui:1.2f} "
else:
s += "r_ui = None "
s += f"est = {self.est:1.2f} "
s += str(self.details)
return s
|
__init__.py | """
The :mod:`prediction_algorithms` package includes the prediction algorithms
available for recommendation.
The available prediction algorithms are:
.. autosummary::
:nosignatures:
random_pred.NormalPredictor
baseline_only.BaselineOnly
knns.KNNBasic
knns.KNNWithMeans
knns.KNNWithZScore
knns.KNNBaseline
matrix_factorization.SVD
matrix_factorization.SVDpp
matrix_factorization.NMF
slope_one.SlopeOne
co_clustering.CoClustering
"""
from .algo_base import AlgoBase
from .baseline_only import BaselineOnly
from .co_clustering import CoClustering
from .knns import KNNBaseline, KNNBasic, KNNWithMeans, KNNWithZScore
from .matrix_factorization import NMF, SVD, SVDpp
from .predictions import Prediction, PredictionImpossible
from .random_pred import NormalPredictor
from .slope_one import SlopeOne
__all__ = [
"AlgoBase",
"NormalPredictor",
"BaselineOnly",
"KNNBasic",
"KNNBaseline",
"KNNWithMeans",
"SVD",
"SVDpp",
"NMF",
"SlopeOne",
"CoClustering",
"PredictionImpossible",
"Prediction",
"KNNWithZScore",
]
|
baseline_only.py | """
This class implements the baseline estimation.
"""
from .algo_base import AlgoBase
class BaselineOnly(AlgoBase):
r"""Algorithm predicting the baseline estimate for given user and item.
:math:`\hat{r}_{ui} = b_{ui} = \mu + b_u + b_i`
If user :math:`u` is unknown, then the bias :math:`b_u` is assumed to be
zero. The same applies for item :math:`i` with :math:`b_i`.
See section 2.1 of :cite:`Koren:2010` for details.
Args:
bsl_options(dict): A dictionary of options for the baseline estimates
computation. See :ref:`baseline_estimates_configuration` for
accepted options.
verbose(bool): Whether to print trace messages of bias estimation,
similarity, etc. Default is True.
"""
def __init__(self, bsl_options={}, verbose=True):
AlgoBase.__init__(self, bsl_options=bsl_options)
self.verbose = verbose
def fit(self, trainset):
AlgoBase.fit(self, trainset)
self.bu, self.bi = self.compute_baselines()
return self
def estimate(self, u, i):
est = self.trainset.global_mean
if self.trainset.knows_user(u):
est += self.bu[u]
if self.trainset.knows_item(i):
est += self.bi[i]
return est
|
linux_dependencies.py | import os
import traceback
import sys
print("before function process")
def process(version):
print("inside fun process")
currentDirectory = os.path.dirname(os.path.abspath(__file__))
print(currentDirectory)
try:
from os.path import expanduser
import platform
import subprocess
import sys
import demoji
try:
print('Downloading NLTK additional packages...')
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
except Exception as e:
print('NLTK Error: '+str(e))
pass
from appbe.dataPath import DATA_DIR
import shutil
import importlib
license_path = DATA_DIR
if os.path.isdir(license_path) == False:
os.makedirs(license_path)
import warnings
warnings.filterwarnings("ignore")
LicenseFolder = os.path.join(license_path,'License')
if os.path.isdir(LicenseFolder) == False:
os.makedirs(LicenseFolder)
sqlite_path = os.path.join(license_path,'sqlite')
if os.path.isdir(sqlite_path) == False:
os.makedirs(sqlite_path)
pretrainedModel_path = os.path.join(license_path,'PreTrainedModels')
if os.path.isdir(pretrainedModel_path) == False:
os.makedirs(pretrainedModel_path)
config_path = os.path.join(license_path,'config')
if os.path.isdir(config_path) == False:
os.makedirs(config_path)
target_path = os.path.join(license_path,'target')
if os.path.isdir(target_path) == False:
os.makedirs(target_path)
data_path = os.path.join(license_path,'storage')
if os.path.isdir(data_path) == False:
os.makedirs(data_path)
log_path = os.path.join(license_path,'logs')
if os.path.isdir(log_path) == False:
os.makedirs(log_path)
configFolder = os.path.join(currentDirectory,'..','config')
for file in os.listdir(configFolder):
if file.endswith(".var"):
os.remove(os.path.join(configFolder,file))
versionfile = os.path.join(configFolder,str(version)+'.var')
with open(versionfile, 'w') as fp:
pass
manage_path = os.path.join(currentDirectory,'..','aion.py')
print('Setting up Django Environment for AION User Interface')
proc = subprocess.Popen([sys.executable, manage_path, "-m","migrateappfe"],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
err_string = stderr.decode('utf8')
import re
result = re.search("No module named '(.*)'", err_string)
if 'ModuleNotFoundError' in err_string:
print('\n"{}" module is missing. The dependencies of AION were not installed properly. Uninstall and reinstall AION'.format(result.group(1)))
else:
print('\nThe dependencies of AION were not installed properly. Uninstall and reinstall AION')
raise Exception(err_string)
else:
print('AION User Interface successfully set')
print('--------------AION Installed Successfully--------------')
except Exception as e:
print(e)
f = open(os.path.join(currentDirectory, 'workspace_error_logs.txt'), "w")
f.write(str(traceback.format_exc()))
f.close()
pass
if __name__ == "__main__":
process(sys.argv[1]) |
dependencies.py | import os
import traceback
def process(version):
currentDirectory = os.path.dirname(os.path.abspath(__file__))
try:
import win32com.client
from os.path import expanduser
import platform
import subprocess
import sys
import demoji
try:
print('Downloading NLTK additional packages...')
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
except Exception as e:
print('NLTK Error: '+str(e))
pass
from appbe.dataPath import DATA_DIR
from win32com.shell import shell, shellcon
import shutil
import importlib
license_path = DATA_DIR
if os.path.isdir(license_path) == False:
os.makedirs(license_path)
import warnings
warnings.filterwarnings("ignore")
LicenseFolder = os.path.join(license_path,'License')
if os.path.isdir(LicenseFolder) == False:
os.makedirs(LicenseFolder)
sqlite_path = os.path.join(license_path,'sqlite')
if os.path.isdir(sqlite_path) == False:
os.makedirs(sqlite_path)
pretrainedModel_path = os.path.join(license_path,'PreTrainedModels')
if os.path.isdir(pretrainedModel_path) == False:
os.makedirs(pretrainedModel_path)
config_path = os.path.join(license_path,'config')
if os.path.isdir(config_path) == False:
os.makedirs(config_path)
target_path = os.path.join(license_path,'target')
if os.path.isdir(target_path) == False:
os.makedirs(target_path)
data_path = os.path.join(license_path,'storage')
if os.path.isdir(data_path) == False:
os.makedirs(data_path)
log_path = os.path.join(license_path,'logs')
if os.path.isdir(log_path) == False:
os.makedirs(log_path)
configFolder = os.path.join(currentDirectory,'..','config')
for file in os.listdir(configFolder):
if file.endswith(".var"):
os.remove(os.path.join(configFolder,file))
versionfile = os.path.join(configFolder,str(version)+'.var')
with open(versionfile, 'w') as fp:
pass
manage_path = os.path.join(currentDirectory,'..','aion.py')
print('Setting up Django Environment for AION User Interface')
proc = subprocess.Popen([sys.executable, manage_path, "-m","migrateappfe"],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
err_string = stderr.decode('utf8')
import re
result = re.search("No module named '(.*)'", err_string)
if 'ModuleNotFoundError' in err_string:
print('\n"{}" module is missing. The dependencies of AION were not installed properly. Uninstall and reinstall AION'.format(result.group(1)))
else:
print('\nThe dependencies of AION were not installed properly. Uninstall and reinstall AION')
raise Exception(err_string)
else:
print('AION User Interface successfully set')
desktop = shell.SHGetFolderPath (0, shellcon.CSIDL_DESKTOP, 0, 0)
#desktop = os.path.expanduser('~/Desktop')
path = os.path.join(desktop, 'Explorer {0}.lnk'.format(version))
target = os.path.normpath(os.path.join(currentDirectory,'..', 'sbin', 'AION_Explorer.bat'))
icon = os.path.join(currentDirectory,'icons','aion.ico')
shell = win32com.client.Dispatch("WScript.Shell")
shortcut = shell.CreateShortCut(path)
shortcut.Targetpath = '"'+target+'"'
shortcut.WorkingDirectory = currentDirectory
#shortcut.WorkingDirectory = os.path.dirname(__file__)
shortcut.IconLocation = icon
shortcut.WindowStyle = 1 # 7 - Minimized, 3 - Maximized, 1 - Normal
shortcut.save()
path = os.path.join(desktop, 'Shell {0}.lnk'.format(version))
target = os.path.normpath(os.path.join(currentDirectory,'..','sbin', 'AION_Shell.bat'))
icon = os.path.join(currentDirectory,'icons','aion_shell.ico')
shell = win32com.client.Dispatch("WScript.Shell")
shortcut = shell.CreateShortCut(path)
shortcut.Targetpath = '"'+target+'"'
shortcut.WorkingDirectory = currentDirectory
#shortcut.WorkingDirectory = os.path.dirname(__file__)
shortcut.IconLocation = icon
shortcut.WindowStyle = 1 # 7 - Minimized, 3 - Maximized, 1 - Normal
shortcut.save()
print('--------------AION Installed Successfully--------------')
except Exception as e:
print(e)
f = open(os.path.join(currentDirectory, 'workspace_error_logs.txt'), "w")
f.write(str(traceback.format_exc()))
f.close()
pass
|
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
visualization.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import warnings
import numpy as np
import pandas as pd
import sklearn.metrics as metrics
from collections import defaultdict
from sklearn.metrics import confusion_matrix
import re
import shutil
import scipy.stats as st
import json
import os,sys
import glob
import logging
from utils.file_ops import read_df_compressed
class Visualization():
def __init__(self,usecasename,version,dataframe,visualizationJson,dateTimeColumn,deployPath,dataFolderLocation,numericContinuousFeatures,discreteFeatures,categoricalFeatures,modelFeatures,targetFeature,modeltype,original_data_file,profiled_data_file,trained_data_file,predicted_data_file,labelMaps,vectorizerFeatures,textFeatures,numericalFeatures,nonNumericFeatures,emptyFeatures,nrows,ncols,saved_model,scoreParam,learner_type,modelname,featureReduction,reduction_data_file):
self.dataframe = dataframe
self.displayjson = {}
self.visualizationJson = visualizationJson
self.dateTimeColumn = dateTimeColumn
self.deployPath = deployPath
#shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'aion_portal.py'),self.deployPath)
if learner_type == 'ML' and modelname != 'Neural Architecture Search':
if(os.path.isfile(os.path.join(self.deployPath,'explainable_ai.py'))):
os.remove(os.path.join(self.deployPath,'explainable_ai.py'))
shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','xai','explainable_ai.py'),self.deployPath)
# os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
try:
os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
except FileExistsError:
os.remove(os.path.join(self.deployPath,'aion_xai.py'))
os.rename(os.path.join(self.deployPath,'explainable_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
elif learner_type == 'DL' or modelname == 'Neural Architecture Search':
if(os.path.isfile(os.path.join(self.deployPath,'explainable_ai.py'))):
os.remove(os.path.join(self.deployPath,'explainable_ai.py'))
shutil.copy2(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','xai','explainabledl_ai.py'),self.deployPath)
# os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
try:
os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
except FileExistsError:
os.remove(os.path.join(self.deployPath,'aion_xai.py'))
os.rename(os.path.join(self.deployPath,'explainabledl_ai.py'),os.path.join(self.deployPath,'aion_xai.py'))
self.jsondeployPath = deployPath
#self.deployPath = self.deployPath+'visualization/'
self.dataFolderLocation = dataFolderLocation
self.vectorizerFeatures = vectorizerFeatures
self.textFeatures = textFeatures
self.emptyFeatures = emptyFeatures
'''
try:
os.makedirs(self.deployPath)
except OSError as e:
print("\nFolder Already Exists")
'''
self.numericContinuousFeatures = numericContinuousFeatures
self.discreteFeatures = discreteFeatures
self.categoricalFeatures = categoricalFeatures
self.modelFeatures = modelFeatures
self.modeltype = modeltype
self.targetFeature = targetFeature
self.displayjson['usecasename'] = str(usecasename)
self.displayjson['version'] = str(version)
self.displayjson['problemType'] = str(self.modeltype)
self.displayjson['targetFeature'] = self.targetFeature
self.displayjson['numericalFeatures'] = numericalFeatures
self.displayjson['nonNumericFeatures'] = nonNumericFeatures
self.displayjson['modelFeatures'] = self.modelFeatures
self.displayjson['textFeatures'] = self.textFeatures
self.displayjson['emptyFeatures'] = self.emptyFeatures
self.displayjson['modelname']= str(modelname)
self.displayjson['preprocessedData'] = str(original_data_file)
self.displayjson['nrows'] = str(nrows)
self.displayjson['ncols'] = str(ncols)
self.displayjson['saved_model'] = str(saved_model)
self.displayjson['scoreParam'] = str(scoreParam)
self.displayjson['labelMaps'] = eval(str(labelMaps))
self.original_data_file = original_data_file
self.displayjson['featureReduction'] = featureReduction
if featureReduction == 'True':
self.displayjson['reduction_data_file'] = reduction_data_file
else:
self.displayjson['reduction_data_file'] = ''
self.pred_filename = predicted_data_file
self.profiled_data_file = profiled_data_file
self.displayjson['predictedData'] = predicted_data_file
self.displayjson['postprocessedData'] = profiled_data_file
#self.trained_data_file = trained_data_file
#self.displayjson['trainingData'] = trained_data_file
#self.displayjson['categorialFeatures']=categoricalFeatures
#self.displayjson['discreteFeatures']=discreteFeatures
#self.displayjson['continuousFeatures']=numericContinuousFeatures
#y = json.dumps(self.displayjson)
#print(y)
self.labelMaps = labelMaps
self.log = logging.getLogger('eion')
def visualizationrecommandsystem(self):
try:
import tensorflow.keras.utils as kutils
datasetid = self.visualizationJson['datasetid']
self.log.info('\n================== Data Profiling Details==================')
datacolumns=list(self.dataframe.columns)
self.log.info('================== Data Profiling Details End ==================\n')
self.log.info('================== Features Correlation Details ==================\n')
self.log.info('\n================== Model Performance Analysis ==================')
if os.path.exists(self.pred_filename):
try:
status,df=read_df_compressed(self.pred_filename)
if self.modeltype == 'Classification' or self.modeltype == 'ImageClassification' or self.modeltype == 'anomaly_detection':
y_actual = df['actual'].values
y_predict = df['predict'].values
y_actual = kutils.to_categorical(y_actual)
y_predict = kutils.to_categorical(y_predict)
classes = df.actual.unique()
n_classes = y_actual.shape[1]
self.log.info('-------> ROC AUC CURVE')
roc_curve_dict = []
for i in classes:
try:
classname = i
if str(self.labelMaps) != '{}':
inv_map = {v: k for k, v in self.labelMaps.items()}
classname = inv_map[i]
fpr, tpr, threshold = metrics.roc_curve(y_actual[:,i],y_predict[:,i])
roc_auc = metrics.auc(fpr, tpr)
class_roc_auc_curve = {}
class_roc_auc_curve['class'] = str(classname)
fprstring = ','.join(str(v) for v in fpr)
tprstring = ','.join(str(v) for v in tpr)
class_roc_auc_curve['FP'] = str(fprstring)
class_roc_auc_curve['TP'] = str(tprstring)
roc_curve_dict.append(class_roc_auc_curve)
self.log.info('----------> Class: '+str(classname))
self.log.info('------------> ROC_AUC: '+str(roc_auc))
self.log.info('------------> False Positive Rate (x Points): '+str(fpr))
self.log.info('------------> True Positive Rate (y Points): '+str(tpr))
except:
pass
self.displayjson['ROC_AUC_CURVE'] = roc_curve_dict
self.log.info('-------> Precision Recall CURVE')
precision_recall_curve_dict = []
for i in range(n_classes):
try:
lr_precision, lr_recall, threshold = metrics.precision_recall_curve(y_actual[:,i],y_predict[:,i])
classname = i
if str(self.labelMaps) != '{}':
inv_map = {v: k for k, v in self.labelMaps.items()}
classname = inv_map[i]
roc_auc = metrics.auc(lr_recall,lr_precision)
class_precision_recall_curve = {}
class_precision_recall_curve['class'] = str(classname)
Precisionstring = ','.join(str(round(v,2)) for v in lr_precision)
Recallstring = ','.join(str(round(v,2)) for v in lr_recall)
class_precision_recall_curve['Precision'] = str(Precisionstring)
class_precision_recall_curve['Recall'] = str(Recallstring)
precision_recall_curve_dict.append(class_precision_recall_curve)
except:
pass
self.log.info('----------> Class: '+str(classname))
self.log.info('------------> ROC_AUC: '+str(roc_auc))
self.log.info('------------> Recall (x Points): '+str(lr_precision))
self.log.info('------------> Precision (y Points): '+str(lr_recall))
self.displayjson['PRECISION_RECALL_CURVE'] = precision_recall_curve_dict
status,predictdataFrame=read_df_compressed(self.displayjson['predictedData'])
except Exception as e:
self.log.info('================== Error in Calculation ROC_AUC/Recall Precision Curve '+str(e))
self.log.info('================== Model Performance Analysis End ==================\n')
self.log.info('\n================== For Descriptive Analysis of Model Features ==================')
outputfile = os.path.join(self.jsondeployPath,'etc','display.json')
with open(outputfile, 'w') as fp:
json.dump(self.displayjson, fp)
self.log.info('================== For Descriptive Analysis of Model Features End ==================\n')
except Exception as inst:
self.log.info('Visualization Failed !....'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def drawlinechart(self,xcolumn,ycolumn,deploy_path,datasetid):
title = 'aion_visualization_'+xcolumn+"_"+ycolumn+"_linechart"
yaxisname = 'Average '+ycolumn
datasetindex = datasetid
visulizationjson = '[{"_id": "543234","_type": "visualization","_source": {"title": "'+title+'",'
visulizationjson = visulizationjson+'"visState": "{\\"title\\":\\"'+title+'\\",'
visulizationjson = visulizationjson+'\\"type\\":\\"line\\",\\"params\\":{\\"type\\":\\"line\\",\\"grid\\":{\\"categoryLines\\":false,\\"style\\":{\\"color\\":\\"#eee\\"}},\\"categoryAxes\\":[{\\"id\\":\\"CategoryAxis-1\\",\\"type\\":\\"category\\",\\"position\\":\\"bottom\\",\\"show\\":true,\\"style\\":{},\\"scale\\":{\\"type\\":\\"linear\\"},\\"labels\\":{\\"show\\":true,\\"truncate\\":100},\\"title\\":{}}],\\"valueAxes\\":[{\\"id\\":\\"ValueAxis-1\\",\\"name\\":\\"LeftAxis-1\\",\\"type\\":\\"value\\",\\"position\\":\\"left\\",\\"show\\":true,\\"style\\":{},\\"scale\\":{\\"type\\":\\"linear\\",\\"mode\\":\\"normal\\"},\\"labels\\":{\\"show\\":true,\\"rotate\\":0,\\"filter\\":false,\\"truncate\\":100},\\"title\\":'
visulizationjson = visulizationjson+'{\\"text\\":\\"'+yaxisname+'\\"}}],\\"seriesParams\\":[{\\"show\\":\\"true\\",\\"type\\":\\"line\\",\\"mode\\":\\"normal\\",\\"data\\":'
visulizationjson = visulizationjson+'{\\"label\\":\\"'+yaxisname+'\\",\\"id\\":\\"1\\"},\\"valueAxis\\":\\"ValueAxis-1\\",\\"drawLinesBetweenPoints\\":true,\\"showCircles\\":true}],\\"addTooltip\\":true,\\"addLegend\\":true,\\"legendPosition\\":\\"right\\",\\"times\\":[],\\"addTimeMarker\\":false},\\"aggs\\":[{\\"id\\":\\"1\\",\\"enabled\\":true,\\"type\\":\\"avg\\",\\"schema\\":\\"metric\\",\\"params\\":{\\"field\\":\\"'+str(ycolumn)+'\\"}},{\\"id\\":\\"2\\",\\"enabled\\":true,\\"type\\":\\"terms\\",\\"schema\\":\\"segment\\",\\"params\\":{\\"field\\":\\"'+xcolumn+'\\",\\"size\\":100,\\"order\\":\\"desc\\",\\"orderBy\\":\\"1\\",\\"otherBucket\\":false,\\"otherBucketLabel\\":\\"Other\\",\\"missingBucket\\":false,\\"missingBucketLabel\\":\\"Missing\\"}}]}","uiStateJSON": "{}", "description": "","version": 1,"kibanaSavedObjectMeta": {"searchSourceJSON": "{\\"index\\":\\"'+datasetindex+'\\",\\"query\\":{\\"query\\":\\"\\",\\"language\\":\\"lucene\\"},\\"filter\\":[]}"}},"_migrationVersion": {"visualization": "6.7.2"}}]'
filename = deploy_path+title+'.json'
f = open(filename, "w")
f.write(str(visulizationjson))
f.close()
def drawbarchart(self,xcolumn,ycolumn,deploy_path,datasetid):
title = 'aion_visualization_'+xcolumn+"_"+ycolumn+"_barchart"
yaxisname = 'Average '+ycolumn
datasetindex = datasetid
visulizationjson = '[{"_id": "123456","_type": "visualization","_source": {"title":"'+title+'",'
visulizationjson = visulizationjson+'"visState": "{\\"title\\":\\"'+title+'\\",'
visulizationjson = visulizationjson+'\\"type\\":\\"histogram\\",\\"params\\":{\\"addLegend\\":true,\\"addTimeMarker\\":false,\\"addTooltip\\":true,\\"categoryAxes\\":[{\\"id\\":\\"CategoryAxis-1\\",\\"labels\\":{\\"show\\":true,\\"truncate\\":100},\\"position\\":\\"bottom\\",\\"scale\\":{\\"type\\":\\"linear\\"},\\"show\\":true,\\"style\\":{},\\"title\\":{},\\"type\\":\\"category\\"}],\\"grid\\":{\\"categoryLines\\":false,\\"style\\":{\\"color\\":\\"#eee\\"}},\\"legendPosition\\":\\"right\\",\\"seriesParams\\":[{\\"data\\":{\\"id\\":\\"1\\",'
visulizationjson = visulizationjson+'\\"label\\":\\"'+yaxisname+'\\"},'
visulizationjson = visulizationjson+'\\"drawLinesBetweenPoints\\":true,\\"mode\\":\\"stacked\\",\\"show\\":\\"true\\",\\"showCircles\\":true,\\"type\\":\\"histogram\\",\\"valueAxis\\":\\"ValueAxis-1\\"}],\\"times\\":[],\\"type\\":\\"histogram\\",\\"valueAxes\\":[{\\"id\\":\\"ValueAxis-1\\",\\"labels\\":{\\"filter\\":false,\\"rotate\\":0,\\"show\\":true,\\"truncate\\":100},\\"name\\":\\"LeftAxis-1\\",\\"position\\":\\"left\\",\\"scale\\":{\\"mode\\":\\"normal\\",\\"type\\":\\"linear\\"},\\"show\\":true,\\"style\\":{},\\"title\\":'
visulizationjson = visulizationjson+'{\\"text\\":\\"'+yaxisname+'\\"},'
visulizationjson = visulizationjson+'\\"type\\":\\"value\\"}]},\\"aggs\\":[{\\"id\\":\\"1\\",\\"enabled\\":true,\\"type\\":\\"avg\\",\\"schema\\":\\"metric\\",\\"params\\":{\\"field\\":\\"'+str(xcolumn)+'\\"}},{\\"id\\":\\"2\\",\\"enabled\\":true,\\"type\\":\\"terms\\",\\"schema\\":\\"segment\\",\\"params\\":{\\"field\\":\\"'+ycolumn+'\\",\\"size\\":100,\\"order\\":\\"asc\\",\\"orderBy\\":\\"1\\",\\"otherBucket\\":false,\\"otherBucketLabel\\":\\"Other\\",\\"missingBucket\\":false,\\"missingBucketLabel\\":\\"Missing\\"}}]}","uiStateJSON":"{}","description": "","version": 1,"kibanaSavedObjectMeta": {'
visulizationjson = visulizationjson+'"searchSourceJSON": "{\\"index\\":\\"'+datasetindex+'\\",\\"query\\":{\\"language\\":\\"lucene\\",\\"query\\":\\"\\"},\\"filter\\":[]}"}},"_migrationVersion":{"visualization": "6.7.2"}}]'
filename = deploy_path+title+'.json'
f = open(filename, "w")
f.write(str(visulizationjson))
f.close()
def drawpiechart(self,xcolumn,deploy_path,datasetid):
title = 'aion_visualization_'+xcolumn+"_piechart"
datasetindex = datasetid
visulizationjson = '[{"_id": "123456","_type": "visualization","_source": {"title":"'+title+'",'
visulizationjson = visulizationjson+'"visState": "{\\"title\\":\\"'+title+'\\",'
visulizationjson = visulizationjson+'\\"type\\":\\"pie\\",\\"params\\":{\\"type\\":\\"pie\\",\\"addTooltip\\":true,\\"addLegend\\":true,\\"legendPosition\\":\\"right\\",\\"isDonut\\":true,\\"labels\\":{\\"show\\":false,\\"values\\":true,\\"last_level\\":true,\\"truncate\\":100}},\\"aggs\\":[{\\"id\\":\\"1\\",\\"enabled\\":true,\\"type\\":\\"count\\",\\"schema\\":\\"metric\\",\\"params\\":{}},{\\"id\\":\\"2\\",\\"enabled\\":true,\\"type\\":\\"terms\\",\\"schema\\":\\"segment\\",\\"params\\":{\\"field\\":\\"'+xcolumn+'\\",\\"size\\":100,\\"order\\":\\"asc\\",\\"orderBy\\":\\"1\\",\\"otherBucket\\":false,\\"otherBucketLabel\\":\\"Other\\",\\"missingBucket\\":false,\\"missingBucketLabel\\":\\"Missing\\"}}]}",'
visulizationjson = visulizationjson+'"uiStateJSON": "{}","description": "","version": 1,"kibanaSavedObjectMeta": {"searchSourceJSON":"{\\"index\\":\\"'+datasetid+'\\",\\"query\\":{\\"query\\":\\"\\",\\"language\\":\\"lucene\\"},\\"filter\\":[]}"}},"_migrationVersion": {"visualization": "6.7.2"}}]'
filename = deploy_path+title+'.json'
f = open(filename, "w")
f.write(str(visulizationjson))
f.close()
def get_confusion_matrix(self,df):
setOfyTrue = set(df['actual'])
unqClassLst = list(setOfyTrue)
if(str(self.labelMaps) != '{}'):
inv_mapping_dict = {v: k for k, v in self.labelMaps.items()}
unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict)
unqClassLst2 = list(unqClassLst2)
else:
unqClassLst2 = unqClassLst
indexName = []
columnName = []
for item in unqClassLst2:
indexName.append("act:"+str(item))
columnName.append("pre:"+str(item))
result = pd.DataFrame(confusion_matrix(df['actual'], df['predict'], labels = unqClassLst),index = indexName, columns = columnName)
resultjson = result.to_json(orient='index')
return(resultjson)
def DistributionFinder(self,data):
try:
distributionName =""
sse =0.0
KStestStatic=0.0
dataType=""
if(data.dtype == "float64"):
dataType ="Continuous"
elif(data.dtype =="int" or data.dtype =="int64"):
dataType="Discrete"
if(dataType == "Discrete"):
distributions= [st.bernoulli,st.binom,st.geom,st.nbinom,st.poisson]
index, counts = np.unique(abs(data.astype(int)),return_counts=True)
if(len(index)>=2):
best_sse = np.inf
y1=[]
total=sum(counts)
mean=float(sum(index*counts))/total
variance=float((sum(index**2*counts) -total*mean**2))/(total-1)
dispersion=mean/float(variance)
theta=1/float(dispersion)
r=mean*(float(theta)/1-theta)
for j in counts:
y1.append(float(j)/total)
pmf1=st.bernoulli.pmf(index,mean)
pmf2=st.binom.pmf(index,len(index),p=mean/len(index))
pmf3=st.geom.pmf(index,1/float(1+mean))
pmf4=st.nbinom.pmf(index,mean,r)
pmf5=st.poisson.pmf(index,mean)
sse1 = np.sum(np.power(y1 - pmf1, 2.0))
sse2 = np.sum(np.power(y1 - pmf2, 2.0))
sse3 = np.sum(np.power(y1 - pmf3, 2.0))
sse4 = np.sum(np.power(y1 - pmf4, 2.0))
sse5 = np.sum(np.power(y1- pmf5, 2.0))
sselist=[sse1,sse2,sse3,sse4,sse5]
for i in range(0,len(sselist)):
if best_sse > sselist[i] > 0:
best_distribution = distributions[i].name
best_sse = sselist[i]
elif (len(index) == 1):
best_distribution = "Constant Data-No Distribution"
best_sse = 0.0
distributionName =best_distribution
sse=best_sse
elif(dataType == "Continuous"):
distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]
best_distribution = st.norm.name
best_sse = np.inf
datamin=data.min()
datamax=data.max()
nrange=datamax-datamin
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
params = distribution.fit(data.astype(float))
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if(best_sse >sse > 0):
best_distribution = distribution.name
best_sse = sse
distributionName =best_distribution
sse=best_sse
except:
response = str(sys.exc_info()[0])
message='Job has Failed'+response
print(message)
return distributionName,sse
|
local_pipeline.py | import docker
import json
import logging
def read_json(file_path):
data = None
with open(file_path,'r') as f:
data = json.load(f)
return data
def run_pipeline(inputconfig):
inputconfig = json.loads(inputconfig)
logfilepath = inputconfig['logfilepath']
logging.basicConfig(level=logging.INFO,filename =logfilepath)
usecasename = inputconfig['usecase']
logging.info("UseCaseName :"+str(usecasename))
version = inputconfig['version']
logging.info("version :"+str(version))
config = inputconfig['dockerlist']
persistancevolume = inputconfig['persistancevolume']
logging.info("PersistanceVolume :"+str(persistancevolume))
datasetpath = inputconfig['datasetpath']
logging.info("DataSet Path :"+str(datasetpath))
config = read_json(config)
client = docker.from_env()
inputconfig = {'modelName':usecasename,'modelVersion':str(version),'dataLocation':datasetpath}
inputconfig = json.dumps(inputconfig)
inputconfig = inputconfig.replace('"', '\\"')
logging.info("===== Model Monitoring Container Start =====")
outputStr = client.containers.run(config['ModelMonitoring'],'python code.py -i'+datasetpath,volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('ModelMonitoring: '+str(outputStr))
print('ModelMonitoring: '+str(outputStr))
logging.info("===== ModelMonitoring Stop =====")
logging.info("===== Data Ingestion Container Start =====")
outputStr = client.containers.run(config['DataIngestion'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('DataIngestion: '+str(outputStr))
print('DataIngestion: '+str(outputStr))
logging.info("===== Data Ingestion Container Stop =====")
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
status = decoded_data['Status']
if status != 'Success':
output = {'Status':'Error','Msg':'Data Ingestion Fails'}
logging.info("===== Transformation Container Start =====")
outputStr = client.containers.run(config['DataTransformation'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('Data Transformations: '+str(outputStr))
print('Data Transformations: '+str(outputStr))
logging.info("===== Transformation Container Done =====")
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
status = decoded_data['Status']
if status != 'Success':
output = {'Status':'Error','Msg':'Data Transformations Fails'}
logging.info("===== Feature Engineering Container Start =====")
outputStr = client.containers.run(config['FeatureEngineering'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('FeatureEngineering: '+str(outputStr))
print('FeatureEngineering: '+str(outputStr))
logging.info("===== Feature Engineering Container Done =====")
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
status = decoded_data['Status']
modeltraining = config['ModelTraining']
for mt in modeltraining:
logging.info("===== Training Container Start =====")
outputStr = client.containers.run(mt['Training'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('ModelTraining: '+str(outputStr))
print('ModelTraining: '+str(outputStr))
logging.info("===== Training Container Done =====")
outputStr = outputStr.strip()
try:
decoded_data = json.loads(outputStr)
status = decoded_data['Status']
except Exception as inst:
logging.info(inst)
logging.info("===== Model Registry Start =====")
outputStr = client.containers.run(config['ModelRegistry'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('ModelRegistry: '+str(outputStr))
print('ModelRegistry: '+str(outputStr))
logging.info("===== ModelRegistry Done =====")
logging.info("===== ModelServing Start =====")
outputStr = client.containers.run(config['ModelServing'],'python code.py',volumes=[persistancevolume+':/aion'])
outputStr = outputStr.decode('utf-8')
logging.info('Prediction: '+str(outputStr))
print('Prediction: '+str(outputStr))
logging.info("===== ModelServing Done =====") |
build_container.py | import os
import shutil
import sys
import subprocess
from os.path import expanduser
import platform
import json
def createDockerImage(model_name,model_version,module,folderpath):
command = 'docker pull python:3.8-slim-buster'
os.system(command);
subprocess.check_call(["docker", "build", "-t",module+'_'+model_name.lower()+":"+model_version,"."], cwd=folderpath)
def local_docker_build(config):
print(config)
config = json.loads(config)
model_name = config['usecase']
model_version = config['version']
mlaac__code_path = config['mlacPath']
docker_images = {}
docker_images['ModelMonitoring'] = 'modelmonitoring'+'_'+model_name.lower()+':'+model_version
dataset_addr = os.path.join(mlaac__code_path,'ModelMonitoring')
createDockerImage(model_name,model_version,'modelmonitoring',dataset_addr)
docker_images['DataIngestion'] = 'dataingestion'+'_'+model_name.lower()+':'+model_version
dataset_addr = os.path.join(mlaac__code_path,'DataIngestion')
createDockerImage(model_name,model_version,'dataingestion',dataset_addr)
transformer_addr = os.path.join(mlaac__code_path,'DataTransformation')
docker_images['DataTransformation'] = 'datatransformation'+'_'+model_name.lower()+':'+model_version
createDockerImage(model_name,model_version,'datatransformation',transformer_addr)
featureengineering_addr = os.path.join(mlaac__code_path,'FeatureEngineering')
docker_images['FeatureEngineering'] = 'featureengineering'+'_'+model_name.lower()+':'+model_version
createDockerImage(model_name,model_version,'featureengineering',featureengineering_addr)
from os import listdir
arr = [filename for filename in os.listdir(mlaac__code_path) if filename.startswith("ModelTraining")]
docker_training_images = []
for x in arr:
dockertraing={}
dockertraing['Training'] = str(x).lower()+'_'+model_name.lower()+':'+model_version
docker_training_images.append(dockertraing)
training_addri = os.path.join(mlaac__code_path,x)
createDockerImage(model_name,model_version,str(x).lower(),training_addri)
docker_images['ModelTraining'] = docker_training_images
docker_images['ModelRegistry'] = 'modelregistry'+'_'+model_name.lower()+':'+model_version
deploy_addr = os.path.join(mlaac__code_path,'ModelRegistry')
createDockerImage(model_name,model_version,'modelregistry',deploy_addr)
docker_images['ModelServing'] = 'modelserving'+'_'+model_name.lower()+':'+model_version
deploy_addr = os.path.join(mlaac__code_path,'ModelServing')
createDockerImage(model_name,model_version,'modelserving',deploy_addr)
outputjsonFile = os.path.join(mlaac__code_path,'dockerlist.json')
with open(outputjsonFile, 'w') as f:
json.dump(docker_images, f)
f.close()
output = {'Status':'Success','Msg':outputjsonFile}
output = json.dumps(output)
print("aion_build_container:",output) |
git_upload.py | import os
import sys
import json
from pathlib import Path
import subprocess
import shutil
import argparse
def create_and_save_yaml(git_storage_path, container_label,usecasepath):
file_name_prefix = 'gh-acr-'
yaml_file = f"""\
name: gh-acr-{container_label}
on:
push:
branches: main
paths: {container_label}/**
workflow_dispatch:
jobs:
gh-acr-build-push:
runs-on: ubuntu-latest
steps:
- name: 'checkout action'
uses: actions/checkout@main
- name: 'azure login'
uses: azure/login@v1
with:
creds: ${{{{ secrets.AZURE_CREDENTIALS }}}}
- name: 'build and push image'
uses: azure/docker-login@v1
with:
login-server: ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}
username: ${{{{ secrets.REGISTRY_USERNAME }}}}
password: ${{{{ secrets.REGISTRY_PASSWORD }}}}
- run: |
docker build ./{container_label}/ModelMonitoring -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelmonitoring:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelmonitoring:{container_label}
docker build ./{container_label}/DataIngestion -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/dataingestion:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/dataingestion:{container_label}
docker build ./{container_label}/DataTransformation -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/datatransformation:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/datatransformation:{container_label}
docker build ./{container_label}/FeatureEngineering -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/featureengineering:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/featureengineering:{container_label}
docker build ./{container_label}/ModelRegistry -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelregistry:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelregistry:{container_label}
docker build ./{container_label}/ModelServing -t ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelserving:{container_label}
docker push ${{{{ secrets.REGISTRY_LOGIN_SERVER }}}}/modelserving:{container_label}
"""
arr = [filename for filename in os.listdir(usecasepath) if filename.startswith("ModelTraining")]
for x in arr:
yaml_file+=' docker build ./'+container_label+'/'+x+' -t ${{ secrets.REGISTRY_LOGIN_SERVER }}/'+x.lower()+':'+container_label
yaml_file+='\n'
yaml_file+=' docker push ${{ secrets.REGISTRY_LOGIN_SERVER }}/'+x.lower()+':'+container_label
yaml_file+='\n'
with open(Path(git_storage_path)/(file_name_prefix + container_label + '.yaml'), 'w') as f:
f.write(yaml_file)
def run_cmd(cmd):
try:
subprocess.check_output(cmd, stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
if e.stderr:
if isinstance(e.stderr, bytes):
err_msg = e.stderr.decode(sys.getfilesystemencoding())
else:
err_msg = e.stderr
elif e.output:
if isinstance(e.output, bytes):
err_msg = e.output.decode(sys.getfilesystemencoding())
else:
err_msg = e.output
else:
err_msg = str(e)
return False, err_msg
return True, ""
def validate_config(config):
non_null_keys = ['url','username', 'token', 'location', 'gitFolderLocation', 'email', 'modelName']
missing_keys = [k for k in non_null_keys if k not in config.keys()]
if missing_keys:
raise ValueError(f"following fields are missing in config file: {missing_keys}")
for k,v in config.items():
if k in non_null_keys and not v:
raise ValueError(f"Please provide value for '{k}' in config file.")
def upload(config):
validate_config(config)
url_type = config.get('url_type','https')
if url_type == 'https':
https_str = "https://"
url = https_str + config['username'] + ":" + config['token'] + "@" + config['url'][len(https_str):]
else:
url = config['url']
model_location = Path(config['location'])
git_folder_location = Path(config['gitFolderLocation'])
git_folder_location.mkdir(parents=True, exist_ok=True)
(git_folder_location/'.github'/'workflows').mkdir(parents=True, exist_ok=True)
if not model_location.exists():
raise ValueError('Trained model data not found')
os.chdir(str(git_folder_location))
(git_folder_location/config['modelName']).mkdir(parents=True, exist_ok=True)
shutil.copytree(model_location, git_folder_location/config['modelName'], dirs_exist_ok=True)
create_and_save_yaml((git_folder_location/'.github'/'workflows'), config['modelName'],config['location'])
if (Path(git_folder_location)/'.git').exists():
first_upload = False
else:
first_upload = True
if first_upload:
cmd = ['git','init']
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','config','user.name',config['username']]
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','config','user.email',config['email']]
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','add', '-A']
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','commit','-m',f"commit {config['modelName']}"]
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','branch','-M','main']
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
if first_upload:
cmd = ['git','remote','add','origin', url]
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
cmd = ['git','push','-f','-u','origin', 'main']
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
else:
cmd = ['git','push']
status, msg = run_cmd(cmd)
if not status:
raise ValueError(msg)
return json.dumps({'Status':'SUCCESS'})
if __name__ == '__main__':
try:
if shutil.which('git') is None:
raise ValueError("git is not installed on this system")
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='Config file location or as a string')
args = parser.parse_args()
if Path(args.config).is_file() and Path(args.config).suffix == '.json':
with open(args.config,'r') as f:
config = json.load(f)
else:
config = json.loads(args.config)
print(upload(config))
except Exception as e:
status = {'Status':'Failure','msg':str(e)}
print(json.dumps(status)) |
__init__.py | '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
''' |
kafka_consumer.py | from kafka import KafkaConsumer
from json import loads
import pandas as pd
import json
import os,sys
import time
import multiprocessing
from os.path import expanduser
import platform
import datetime
modelDetails = {}
class Process(multiprocessing.Process):
def __init__(self, modelSignature,jsonData,predictedData,modelpath):
super(Process, self).__init__()
self.config = jsonData
self.modelSignature = modelSignature
self.data = predictedData
self.modelpath = modelpath
def run(self):
#data = pd.json_normalize(self.data)
minotoringService = self.config['minotoringService']['url']
trainingdatalocation = self.config['trainingDataLocation'][self.modelSignature]
#filetimestamp = 'AION_'+str(int(time.time()))+'.csv'
#data.to_csv(dataFile, index=False)
inputFieldsJson = {"trainingDataLocation":trainingdatalocation,"currentDataLocation":self.data}
inputFieldsJson = json.dumps(inputFieldsJson)
ser_url = minotoringService+self.modelSignature+'/monitoring'
driftTime = datetime.datetime.now()
import requests
try:
response = requests.post(ser_url, data=inputFieldsJson,headers={"Content-Type":"application/json",})
outputStr=response.content
outputStr = outputStr.decode('utf-8')
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
print(decoded_data)
status = decoded_data['status']
msg = decoded_data['data']
except Exception as inst:
if 'Failed to establish a new connection' in str(inst):
status = 'Fail'
msg = 'AION Service needs to be started'
else:
status = 'Fail'
msg = 'Error during Drift Analysis'
statusFile = os.path.join(self.modelpath,self.modelSignature+'_status.csv')
df = pd.DataFrame(columns = ['dateTime', 'status', 'msg'])
df = df.append({'dateTime' : driftTime, 'status' : status, 'msg' : msg},ignore_index = True)
print(df)
if (os.path.exists(statusFile)):
df.to_csv(statusFile, mode='a', header=False,index=False)
else:
df.to_csv(statusFile, header=True,index=False)
def launch_kafka_consumer():
from appbe.dataPath import DATA_DIR
configfile = os.path.join(os.path.dirname(__file__),'..','config','kafkaConfig.conf')
with open(configfile,'r',encoding='utf-8') as f:
jsonData = json.load(f)
f.close()
kafkaIP=jsonData['kafkaCluster']['ip']
kafkaport = jsonData['kafkaCluster']['port']
topic = jsonData['kafkaCluster']['topic']
kafkaurl = kafkaIP+':'+kafkaport
if jsonData['database']['csv'] == 'True':
database = 'csv'
elif jsonData['database']['mySql'] == 'True':
database = 'mySql'
else:
database = 'csv'
kafkaPath = os.path.join(DATA_DIR,'kafka')
if not (os.path.exists(kafkaPath)):
try:
os.makedirs(kafkaPath)
except OSError as e:
pass
consumer = KafkaConsumer(topic,bootstrap_servers=[kafkaurl],auto_offset_reset='earliest',enable_auto_commit=True,group_id='my-group',value_deserializer=lambda x: loads(x.decode('utf-8')))
for message in consumer:
message = message.value
data = message['data']
data = pd.json_normalize(data)
modelname = message['usecasename']
version = message['version']
modelSignature = modelname+'_'+str(version)
modelpath = os.path.join(kafkaPath,modelSignature)
try:
os.makedirs(modelpath)
except OSError as e:
pass
secondsSinceEpoch = time.time()
if modelSignature not in modelDetails:
modelDetails[modelSignature] = {}
modelDetails[modelSignature]['startTime'] = secondsSinceEpoch
if database == 'csv':
csvfile = os.path.join(modelpath,modelSignature+'.csv')
if (os.path.exists(csvfile)):
data.to_csv(csvfile, mode='a', header=False,index=False)
else:
data.to_csv(csvfile, header=True,index=False)
modelTimeFrame = jsonData['timeFrame'][modelSignature]
currentseconds = time.time()
print(currentseconds - modelDetails[modelSignature]['startTime'])
if (currentseconds - modelDetails[modelSignature]['startTime']) >= float(modelTimeFrame):
csv_path = os.path.join(modelpath,modelSignature+'.csv')
#predictedData = pd.read_csv(csv_path)
##predictedData = predictedData.to_json(orient="records")
index = Process(modelSignature,jsonData,csv_path,modelpath)
index.start()
modelDetails[modelSignature]['startTime'] = secondsSinceEpoch
|