id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
402 | import argparse
import speakeasy
import logging
def get_logger():
"""
Get the default logger for speakeasy
"""
logger = logging.getLogger('emu_exe')
if not logger.handlers:
sh = logging.StreamHandler()
logger.addHandler(sh)
logger.setLevel(logging.INFO)
return logger
The provided code snippet includes necessary dependencies for implementing the `hook_ntreadfile` function. Write a Python function `def hook_ntreadfile(emu, api_name, func, params)` to solve the following problem:
API hook that is installed to intercept MessageBox calls as an example Args: api_name: The full name including module of the hooked API func: the real emulated function provided by the framework Users can call this by passing in "params" whenever they choose params: the argments passed to the function
Here is the function:
def hook_ntreadfile(emu, api_name, func, params):
"""
API hook that is installed to intercept MessageBox calls as an example
Args:
api_name: The full name including module of the hooked API
func: the real emulated function provided by the framework
Users can call this by passing in "params" whenever they choose
params: the argments passed to the function
"""
# Call the NtReadFile function
rv = func(params)
logger = get_logger()
hnd, evt, apcf, apcc, ios, buf, size, offset, key = params
# Read the buffer containing the file data
data = emu.mem_read(buf, size)
logger.log(logging.INFO, data)
# Write something to the buffer instead
emu.mem_write(buf, b'A' * size)
return rv | API hook that is installed to intercept MessageBox calls as an example Args: api_name: The full name including module of the hooked API func: the real emulated function provided by the framework Users can call this by passing in "params" whenever they choose params: the argments passed to the function |
403 | import argparse
import speakeasy
import logging
def get_logger():
"""
Get the default logger for speakeasy
"""
logger = logging.getLogger('emu_dll')
if not logger.handlers:
sh = logging.StreamHandler()
logger.addHandler(sh)
logger.setLevel(logging.INFO)
return logger
The provided code snippet includes necessary dependencies for implementing the `hook_messagebox` function. Write a Python function `def hook_messagebox(emu, api_name, func, params)` to solve the following problem:
API hook that is installed to intercept MessageBox calls as an example Args: api_name: The full name including module of the hooked API func: the real emulated function provided by the framework Users can call this by passing in "params" whenever they choose params: the argments passed to the function
Here is the function:
def hook_messagebox(emu, api_name, func, params):
"""
API hook that is installed to intercept MessageBox calls as an example
Args:
api_name: The full name including module of the hooked API
func: the real emulated function provided by the framework
Users can call this by passing in "params" whenever they choose
params: the argments passed to the function
"""
# Call the MessageBox function and print its text string data
rv = func(params)
logger = get_logger()
hWnd, lpText, lpCaption, uType = params
msg = '%s text: %s' % (api_name, lpText)
logger.log(logging.INFO, msg)
# Lets read where the stack pointer is
logger.log(logging.INFO, 'Stack pointer is at: 0x%x' % (emu.reg_read('esp')))
return rv | API hook that is installed to intercept MessageBox calls as an example Args: api_name: The full name including module of the hooked API func: the real emulated function provided by the framework Users can call this by passing in "params" whenever they choose params: the argments passed to the function |
404 | import argparse
import speakeasy
import logging
def get_logger():
"""
Get the default logger for speakeasy
"""
logger = logging.getLogger('emu_dll')
if not logger.handlers:
sh = logging.StreamHandler()
logger.addHandler(sh)
logger.setLevel(logging.INFO)
return logger
The provided code snippet includes necessary dependencies for implementing the `hook_mem_write` function. Write a Python function `def hook_mem_write(emu, access, address, size, value, ctx)` to solve the following problem:
Hook that is called whenever memory is written to Args: access: memory access requested address: Memory address that is being written to size: Size of the data being written value: data that is being written to "address"
Here is the function:
def hook_mem_write(emu, access, address, size, value, ctx):
"""
Hook that is called whenever memory is written to
Args:
access: memory access requested
address: Memory address that is being written to
size: Size of the data being written
value: data that is being written to "address"
"""
# For a quick example, lets just log writes that occur to the stack
for mm in emu.get_mem_maps():
if mm.tag and mm.tag.startswith('emu.stack'):
start = mm.get_base()
end = start + mm.get_size()
if start < address < end:
logger = get_logger()
# Get the assembly instruction that did the write
mnem, op, instr = emu.disasm(emu.reg_read('eip'), 0x20)
msg = 'Stack written to: instr: %s addr:0x%x' % (instr, address)
logger.log(logging.INFO, msg)
return | Hook that is called whenever memory is written to Args: access: memory access requested address: Memory address that is being written to size: Size of the data being written value: data that is being written to "address" |
405 | import pathlib
import re
import typing as t
from functools import lru_cache
import setuptools
VERSION_FILE = DEEPCHECKS_DIR / "VERSION"
def is_correct_version_string(value: str) -> bool:
def get_version_string() -> str:
if not (VERSION_FILE.exists() and VERSION_FILE.is_file()):
raise RuntimeError(
"Version file does not exist! "
f"(filepath: {str(VERSION_FILE)})")
else:
version = VERSION_FILE.open("r").readline()
if not is_correct_version_string(version):
raise RuntimeError(
"Incorrect version string! "
f"(filepath: {str(VERSION_FILE)})"
)
return version | null |
406 | import pathlib
import re
import typing as t
from functools import lru_cache
import setuptools
DESCRIPTION_FILE = DEEPCHECKS_DIR / "DESCRIPTION.rst"
def get_description() -> t.Tuple[str, str]:
if not (DESCRIPTION_FILE.exists() and DESCRIPTION_FILE.is_file()):
raise RuntimeError(
"DESCRIPTION.rst file does not exist! "
f"(filepath: {str(DESCRIPTION_FILE)})"
)
else:
return (
"Package for validating your machine learning model and data",
DESCRIPTION_FILE.open("r", encoding="utf8").read()
) | null |
407 | import pathlib
import re
import typing as t
from functools import lru_cache
import setuptools
DEEPCHECKS_DIR = SETUP_MODULE.parent
def read_requirements_file(path):
dependencies = []
dependencies_links = []
for line in path.open("r").readlines():
if "-f" in line or "--find-links" in line:
dependencies_links.append(
line
.replace("-f", "")
.replace("--find-links", "")
.strip()
)
else:
dependencies.append(line)
return dependencies, dependencies_links
def read_requirements() -> t.Dict[str,t.List[str]]:
requirements_folder = DEEPCHECKS_DIR / "requirements"
if not (requirements_folder.exists() and requirements_folder.is_dir()):
raise RuntimeError(
"Cannot find folder with requirements files."
f"(path: {str(requirements_folder)})"
)
else:
main, main_dep_links = read_requirements_file(requirements_folder / "requirements.txt")
vision, vision_dep_links = read_requirements_file(requirements_folder / "vision-requirements.txt")
nlp, nlp_dep_links = read_requirements_file(requirements_folder / "nlp-requirements.txt")
nlp_properties, nlp_properties_dep_links = \
read_requirements_file(requirements_folder / "nlp-prop-requirements.txt")
return {
"dependency_links": main_dep_links + vision_dep_links + nlp_dep_links + nlp_properties_dep_links,
"main": main,
"vision": vision,
"nlp": nlp,
"nlp-properties": nlp_properties
} | null |
408 | from deepchecks.tabular.checks import DatasetsSizeComparison
import pandas as pd
from deepchecks.tabular import Dataset
from deepchecks.tabular.suites import train_test_validation
import pandas as pd
from deepchecks.tabular import Dataset
from deepchecks.tabular.checks import DatasetsSizeComparison
from deepchecks.core import ConditionResult
low_threshold = 0.4
high_threshold = 0.6
from deepchecks.tabular import Suite
from deepchecks.core import ConditionCategory, ConditionResult
low_threshold = 0.3
high_threshold = 0.7
def custom_condition(value: dict, low=low_threshold, high=high_threshold):
ratio = value['Test'] / value['Train']
if low <= ratio <= high:
return ConditionResult(ConditionCategory.PASS)
else:
# Note: if you doesn't care about the extra info, you can return directly a boolean
return ConditionResult(ConditionCategory.FAIL, f'Test-Train ratio is {ratio:.2}') | null |
409 | from deepchecks.tabular.checks import DatasetsSizeComparison
import pandas as pd
from deepchecks.tabular import Dataset
from deepchecks.tabular.suites import train_test_validation
import pandas as pd
from deepchecks.tabular import Dataset
from deepchecks.tabular.checks import DatasetsSizeComparison
from deepchecks.core import ConditionResult
low_threshold = 0.4
high_threshold = 0.6
from deepchecks.tabular import Suite
from deepchecks.core import ConditionCategory, ConditionResult
low_threshold = 0.3
high_threshold = 0.7
def custom_condition(value: dict):
ratio = value['Test'] / value['Train']
if low_threshold <= ratio <= high_threshold:
return ConditionResult(ConditionCategory.PASS)
elif ratio < low_threshold:
return ConditionResult(ConditionCategory.FAIL, f'Test-Train ratio is {ratio:.2}', ConditionCategory.FAIL)
else:
return ConditionResult(ConditionCategory.FAIL, f'Test-Train ratio is {ratio:.2}', ConditionCategory.WARN) | null |
410 | import warnings
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from deepchecks.tabular import Dataset
from deepchecks.tabular.checks import CalibrationScore
from deepchecks.tabular.datasets.classification import adult
def custom_formatwarning(msg, *args, **kwargs):
# ignore everything except the message
return str(msg) + '\n' | null |
411 | import warnings
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from deepchecks.tabular import Dataset
from deepchecks.tabular.checks import RocReport
def custom_formatwarning(msg, *args, **kwargs):
return str(msg) + '\n' | null |
412 | import numpy as np
import pandas as pd
from deepchecks.tabular.datasets.classification import adult
def insert_new_values_types(col: pd.Series, ratio_to_replace: float, values_list):
col = col.to_numpy().astype(object)
indices_to_replace = np.random.choice(range(len(col)), int(len(col) * ratio_to_replace), replace=False)
new_values = np.random.choice(values_list, len(indices_to_replace))
col[indices_to_replace] = new_values
return col
from deepchecks.tabular import Dataset
from deepchecks.tabular.checks import MixedDataTypes
def insert_string_types(col: pd.Series, ratio_to_replace):
return insert_new_values_types(col, ratio_to_replace, ['a', 'b', 'c']) | null |
413 | import numpy as np
import pandas as pd
from deepchecks.tabular.datasets.classification import adult
def insert_new_values_types(col: pd.Series, ratio_to_replace: float, values_list):
col = col.to_numpy().astype(object)
indices_to_replace = np.random.choice(range(len(col)), int(len(col) * ratio_to_replace), replace=False)
new_values = np.random.choice(values_list, len(indices_to_replace))
col[indices_to_replace] = new_values
return col
from deepchecks.tabular import Dataset
from deepchecks.tabular.checks import MixedDataTypes
def insert_numeric_string_types(col: pd.Series, ratio_to_replace):
return insert_new_values_types(col, ratio_to_replace, ['1.0', '1', '10394.33']) | null |
414 | import numpy as np
import pandas as pd
from deepchecks.tabular.datasets.classification import adult
def insert_new_values_types(col: pd.Series, ratio_to_replace: float, values_list):
from deepchecks.tabular import Dataset
from deepchecks.tabular.checks import MixedDataTypes
def insert_number_types(col: pd.Series, ratio_to_replace):
return insert_new_values_types(col, ratio_to_replace, [66, 99.9]) | null |
415 | from deepchecks.vision.checks import PredictionDrift
from deepchecks.vision.datasets.classification.mnist_torch import load_dataset
from deepchecks.vision.checks import ClassPerformance
import numpy as np
import torch
np.random.seed(42)
from deepchecks.vision.datasets.detection.coco_torch import load_dataset
def generate_collate_fn_with_label_drift(collate_fn):
def collate_fn_with_label_drift(batch):
batch_dict = collate_fn(batch)
images = batch_dict['images']
labels = batch_dict['labels']
for i in range(len(images)):
image, label = images[i], labels[i]
if label == 0:
if np.random.randint(5) != 0:
batch_dict['labels'][i] = 1
# In 9/10 cases, the prediction vector will change to match the label
if np.random.randint(10) != 0:
batch_dict['predictions'][i] = torch.tensor([0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
return batch_dict
return collate_fn_with_label_drift | null |
416 | from deepchecks.tabular import datasets
import pandas as pd
from deepchecks.tabular import Dataset
from deepchecks.tabular.suites import data_integrity
from deepchecks.tabular.checks import IsSingleValue, DataDuplicates
def add_dirty_data(df):
# change strings
df.loc[df[df['type'] == 'organic'].sample(frac=0.18).index,'type'] = 'Organic'
df.loc[df[df['type'] == 'organic'].sample(frac=0.01).index,'type'] = 'ORGANIC'
# add duplicates
df = pd.concat([df, df.sample(frac=0.156)], axis=0, ignore_index=True)
# add column with single value
df['Is Ripe'] = True
return df | null |
417 | import functools
import inspect
import os
import pathlib
import sys
import typing as t
import re
from subprocess import check_output
import plotly.io as pio
from plotly.io._sg_scraper import plotly_sg_scraper
import deepchecks
from deepchecks import vision
from deepchecks.utils.strings import to_snake_case
os.environ['DEEPCHECKS_DISABLE_LATEST'] = 'true'
if os.environ.get("GITHUB_REF_NAME"):
if os.environ.get("GITHUB_REF_NAME") == 'main':
version = 'dev'
else:
# Taking the major and minor version from the branch name
version_match: re.Match = re.match(r'\d+(?:\.\d+)', os.environ.get("GITHUB_REF_NAME"))
if version_match is not None:
version = version_match.group(0)
def path_exists(path: str):
return os.path.exists(path) | null |
418 | import functools
import inspect
import os
import pathlib
import sys
import typing as t
import re
from subprocess import check_output
import plotly.io as pio
from plotly.io._sg_scraper import plotly_sg_scraper
import deepchecks
from deepchecks import vision
from deepchecks.utils.strings import to_snake_case
os.environ['DEEPCHECKS_DISABLE_LATEST'] = 'true'
if os.environ.get("GITHUB_REF_NAME"):
if os.environ.get("GITHUB_REF_NAME") == 'main':
version = 'dev'
else:
# Taking the major and minor version from the branch name
version_match: re.Match = re.match(r'\d+(?:\.\d+)', os.environ.get("GITHUB_REF_NAME"))
if version_match is not None:
version = version_match.group(0)
def getswd(pth: str):
return os.getcwd() | null |
419 | import functools
import inspect
import os
import pathlib
import sys
import typing as t
import re
from subprocess import check_output
import plotly.io as pio
from plotly.io._sg_scraper import plotly_sg_scraper
import deepchecks
from deepchecks import vision
from deepchecks.utils.strings import to_snake_case
os.environ['DEEPCHECKS_DISABLE_LATEST'] = 'true'
if os.environ.get("GITHUB_REF_NAME"):
if os.environ.get("GITHUB_REF_NAME") == 'main':
version = 'dev'
else:
# Taking the major and minor version from the branch name
version_match: re.Match = re.match(r'\d+(?:\.\d+)', os.environ.get("GITHUB_REF_NAME"))
if version_match is not None:
version = version_match.group(0)
def _import_object_from_name(module_name, fullname):
_top_modules = ['deepchecks']
def _get_source_relative_path(source_abs_path):
def linkcode_resolve(domain, info):
if domain != 'py' or not info['module']:
return None
# Import the object from module path
obj = _import_object_from_name(info['module'], info['fullname'])
# If it's not defined in the internal module, return None.
mod = inspect.getmodule(obj)
if mod is None:
return None
if not mod.__name__.split('.')[0] in _top_modules:
return None
# Get the source file name and line number at which obj is defined.
try:
filename = inspect.getsourcefile(obj)
except TypeError:
# obj is not a module, class, function, ..etc.
return None
except AttributeError:
return None
# inspect can return None for cython objects
if filename is None:
return None
# Get the source line number
_, linenum = inspect.getsourcelines(obj)
assert isinstance(linenum, int)
filename = os.path.realpath(filename)
relpath = _get_source_relative_path(filename)
return f'https://github.com/deepchecks/deepchecks/blob/{tag}/{relpath}#L{linenum}' | null |
420 | import functools
import inspect
import os
import pathlib
import sys
import typing as t
import re
from subprocess import check_output
import plotly.io as pio
from plotly.io._sg_scraper import plotly_sg_scraper
import deepchecks
from deepchecks import vision
from deepchecks.utils.strings import to_snake_case
def get_check_example_api_reference(filepath: str) -> t.Optional[str]:
if not (
filepath.startswith("tabular/auto_checks")
or filepath.startswith("vision/auto_checks")
or filepath.startswith("nlp/auto_checks")
):
return ''
notebook_name = snake_case_to_camel_case(
filepath.split("/")[-1][5:]
.replace(".txt", "")
.replace(".ipynb", "")
.replace(".py", "")
)
if filepath.startswith("tabular/auto_checks"):
import deepchecks.tabular.checks
check_clazz = getattr(deepchecks.tabular.checks, notebook_name, None)
elif filepath.startswith("vision/auto_checks"):
import deepchecks.vision.checks
check_clazz = getattr(deepchecks.vision.checks, notebook_name, None)
else:
import deepchecks.nlp.checks
check_clazz = getattr(deepchecks.nlp.checks, notebook_name, None)
if check_clazz is None or not hasattr(check_clazz, "__module__"):
return
clazz_module = ".".join(check_clazz.__module__.split(".")[:-1])
apipath = f"<ul><li><a href='../../../api/generated/{clazz_module}.{notebook_name}.html'>API Reference - {notebook_name}</a></li></ul>"
return apipath
def get_report_issue_url(pagename: str) -> str:
template = (
"https://github.com/{user}/{repo}/issues/new?title={title}&body={body}&labels={labels}"
)
return template.format(
user=GIT["user"],
repo=GIT["repo"],
title="[Docs] Documentation contains a mistake.",
body=f"Package Version: {version};\nPage: {pagename}",
labels="labels=chore/documentation",
)
def setup(app):
def add_custom_routines(app, pagename, templatename, context, doctree):
context["get_report_issue_url"] = get_report_issue_url
context["get_check_example_api_reference"] = get_check_example_api_reference
# make custom routines available within html templates
app.connect("html-page-context", add_custom_routines)
return {
"parallel_read_safe": True,
"parallel_write_safe": True,
} | null |
421 | from deepchecks.vision.datasets.segmentation.segmentation_coco import CocoSegmentationDataset, load_model
model = load_model(pretrained=True)
import torch
import torchvision.transforms.functional as F
from deepchecks.vision.vision_data import BatchOutputFormat
from torch.utils.data import DataLoader
from deepchecks.vision import VisionData
from deepchecks.vision.suites import model_evaluation
The provided code snippet includes necessary dependencies for implementing the `deepchecks_collate_fn` function. Write a Python function `def deepchecks_collate_fn(batch) -> BatchOutputFormat` to solve the following problem:
Return a batch of images, labels and predictions for a batch of data. The expected format is a dictionary with the following keys: 'images', 'labels' and 'predictions', each value is in the deepchecks format for the task. You can also use the BatchOutputFormat class to create the output.
Here is the function:
def deepchecks_collate_fn(batch) -> BatchOutputFormat:
"""Return a batch of images, labels and predictions for a batch of data. The expected format is a dictionary with
the following keys: 'images', 'labels' and 'predictions', each value is in the deepchecks format for the task.
You can also use the BatchOutputFormat class to create the output.
"""
# batch received as iterable of tuples of (image, label) and transformed to tuple of iterables of images and labels:
batch = tuple(zip(*batch))
# images:
images = [tensor.numpy().transpose((1, 2, 0)) for tensor in batch[0]]
#labels:
labels = batch[1]
#predictions:
normalized_batch = [F.normalize(img.unsqueeze(0).float() / 255,
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) for img in batch[0]]
predictions = [model(img)["out"].squeeze(0).detach() for img in normalized_batch]
predictions = [torch.nn.functional.softmax(pred, dim=0) for pred in predictions]
return BatchOutputFormat(images=images, labels=labels, predictions=predictions) | Return a batch of images, labels and predictions for a batch of data. The expected format is a dictionary with the following keys: 'images', 'labels' and 'predictions', each value is in the deepchecks format for the task. You can also use the BatchOutputFormat class to create the output. |
422 | import os
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
import albumentations as A
from albumentations.pytorch import ToTensorV2
from PIL import Image
import xml.etree.ElementTree as ET
import urllib.request
import zipfile
from functools import partial
from torch import nn
import torchvision
from torchvision.models.detection import _utils as det_utils
from torchvision.models.detection.ssdlite import SSDLiteClassificationHead
def get_untransformed_images(original_images):
"""
Convert a batch of data to images in the expected format. The expected format is an iterable of images,
where each image is a numpy array of shape (height, width, channels). The numbers in the array should be in the
range [0, 255] in a uint8 format.
"""
inp = torch.stack(list(original_images)).cpu().detach().numpy().transpose((0, 2, 3, 1))
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
# Un-normalize the images
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return inp * 255
def transform_labels_to_cxywh(original_labels):
"""
Convert a batch of data to labels in the expected format. The expected format is an iterator of arrays, each array
corresponding to a sample. Each array element is in a shape of [B, 5], where B is the number of bboxes
in the image, and each bounding box is in the structure of [class_id, x, y, w, h].
"""
label = []
for annotation in original_labels:
if len(annotation["boxes"]):
bbox = torch.stack(annotation["boxes"])
# Convert the Pascal VOC xyxy format to xywh format
bbox[:, 2:] = bbox[:, 2:] - bbox[:, :2]
# The label shape is [class_id, x, y, w, h]
label.append(
torch.concat([torch.stack(annotation["labels"]).reshape((-1, 1)), bbox], dim=1)
)
else:
# If it's an empty image, we need to add an empty label
label.append(torch.tensor([]))
return label
def infer_on_images(original_images):
"""
Returns the predictions for a batch of data. The expected format is an iterator of arrays, each array
corresponding to a sample. Each array element is in a shape of [B, 6], where B is the number of bboxes in the
predictions, and each bounding box is in the structure of [x, y, w, h, score, class_id].
Note that model and device here are global variables, and are defined in the previous code block, as the collate
function cannot recieve other arguments than the batch.
"""
nm_thrs = 0.2
score_thrs = 0.7
imgs = list(img.to(device) for img in original_images)
# Getting the predictions of the model on the batch
with torch.no_grad():
preds = model(imgs)
processed_pred = []
for pred in preds:
# Performoing non-maximum suppression on the detections
keep_boxes = torchvision.ops.nms(pred['boxes'], pred['scores'], nm_thrs)
score_filter = pred['scores'][keep_boxes] > score_thrs
# get the filtered result
test_boxes = pred['boxes'][keep_boxes][score_filter].reshape((-1, 4))
test_boxes[:, 2:] = test_boxes[:, 2:] - test_boxes[:, :2] # xyxy to xywh
test_labels = pred['labels'][keep_boxes][score_filter]
test_scores = pred['scores'][keep_boxes][score_filter]
processed_pred.append(
torch.concat([test_boxes, test_scores.reshape((-1, 1)), test_labels.reshape((-1, 1))], dim=1))
return processed_pred
from deepchecks.vision.vision_data import BatchOutputFormat
from deepchecks.vision.vision_data import VisionData
from deepchecks.vision.suites import model_evaluation
The provided code snippet includes necessary dependencies for implementing the `deepchecks_collate_fn` function. Write a Python function `def deepchecks_collate_fn(batch) -> BatchOutputFormat` to solve the following problem:
Return a batch of images, labels and predictions in the deepchecks format.
Here is the function:
def deepchecks_collate_fn(batch) -> BatchOutputFormat:
"""Return a batch of images, labels and predictions in the deepchecks format."""
# batch received as iterable of tuples of (image, label) and transformed to tuple of iterables of images and labels:
batch = tuple(zip(*batch))
images = get_untransformed_images(batch[0])
labels = transform_labels_to_cxywh(batch[1])
predictions = infer_on_images(batch[0])
return BatchOutputFormat(images=images, labels=labels, predictions=predictions) | Return a batch of images, labels and predictions in the deepchecks format. |
423 | import os
import urllib.request
import zipfile
import albumentations as A
import numpy as np
import PIL.Image
import torch
import torchvision
from albumentations.pytorch import ToTensorV2
from torch import nn
from torch.utils.data import DataLoader
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = torchvision.models.resnet18(pretrained=True)
model.fc = nn.Linear(num_ftrs, 2)
model = model.to(device)
from deepchecks.vision.vision_data import BatchOutputFormat
from deepchecks.vision import VisionData
from deepchecks.vision.suites import train_test_validation
The provided code snippet includes necessary dependencies for implementing the `deepchecks_collate_fn` function. Write a Python function `def deepchecks_collate_fn(batch) -> BatchOutputFormat` to solve the following problem:
Return a batch of images, labels and predictions for a batch of data. The expected format is a dictionary with the following keys: 'images', 'labels' and 'predictions', each value is in the deepchecks format for the task. You can also use the BatchOutputFormat class to create the output.
Here is the function:
def deepchecks_collate_fn(batch) -> BatchOutputFormat:
"""Return a batch of images, labels and predictions for a batch of data. The expected format is a dictionary with
the following keys: 'images', 'labels' and 'predictions', each value is in the deepchecks format for the task.
You can also use the BatchOutputFormat class to create the output.
"""
# batch received as iterable of tuples of (image, label) and transformed to tuple of iterables of images and labels:
batch = tuple(zip(*batch))
# images:
inp = torch.stack(batch[0]).detach().numpy().transpose((0, 2, 3, 1))
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
inp = std * inp + mean
images = np.clip(inp, 0, 1) * 255
#labels:
labels = batch[1]
#predictions:
logits = model.to(device)(torch.stack(batch[0]).to(device))
predictions = nn.Softmax(dim=1)(logits)
return BatchOutputFormat(images=images, labels=labels, predictions=predictions) | Return a batch of images, labels and predictions for a batch of data. The expected format is a dictionary with the following keys: 'images', 'labels' and 'predictions', each value is in the deepchecks format for the task. You can also use the BatchOutputFormat class to create the output. |
424 | import typing as t
import numpy as np
from deepchecks.core.check_result import CheckResult
from deepchecks.core.checks import DatasetKind
from deepchecks.core.condition import ConditionCategory
from deepchecks.vision.base_checks import TrainTestCheck
from deepchecks.vision.context import Context
from deepchecks.vision.vision_data.batch_wrapper import BatchWrapper
from deepchecks.vision.datasets.detection.coco_torch import load_dataset
import pandas as pd
import plotly.express as px
from deepchecks.core import ConditionResult
class DatasetKind(enum.Enum):
"""Represents in single dataset checks, which dataset is currently worked on."""
TRAIN = 'Train'
TEST = 'Test'
The provided code snippet includes necessary dependencies for implementing the `init_color_averages_dict` function. Write a Python function `def init_color_averages_dict() -> t.Dict[str, np.array]` to solve the following problem:
Initialize the color averages dicts.
Here is the function:
def init_color_averages_dict() -> t.Dict[str, np.array]:
"""Initialize the color averages dicts."""
return {
DatasetKind.TRAIN.value: np.zeros((3,), dtype=np.float64),
DatasetKind.TEST.value: np.zeros((3,), dtype=np.float64),
} | Initialize the color averages dicts. |
425 | import typing as t
import numpy as np
from deepchecks.core.check_result import CheckResult
from deepchecks.core.checks import DatasetKind
from deepchecks.core.condition import ConditionCategory
from deepchecks.vision.base_checks import TrainTestCheck
from deepchecks.vision.context import Context
from deepchecks.vision.vision_data.batch_wrapper import BatchWrapper
from deepchecks.vision.datasets.detection.coco_torch import load_dataset
import pandas as pd
import plotly.express as px
from deepchecks.core import ConditionResult
class DatasetKind(enum.Enum):
"""Represents in single dataset checks, which dataset is currently worked on."""
TRAIN = 'Train'
TEST = 'Test'
The provided code snippet includes necessary dependencies for implementing the `init_pixel_counts_dict` function. Write a Python function `def init_pixel_counts_dict() -> t.Dict[str, int]` to solve the following problem:
Initialize the pixel counts dicts.
Here is the function:
def init_pixel_counts_dict() -> t.Dict[str, int]:
"""Initialize the pixel counts dicts."""
return {
DatasetKind.TRAIN.value: 0,
DatasetKind.TEST.value: 0,
} | Initialize the pixel counts dicts. |
426 | import typing as t
import numpy as np
from deepchecks.core.check_result import CheckResult
from deepchecks.core.checks import DatasetKind
from deepchecks.core.condition import ConditionCategory
from deepchecks.vision.base_checks import TrainTestCheck
from deepchecks.vision.context import Context
from deepchecks.vision.vision_data.batch_wrapper import BatchWrapper
from deepchecks.vision.datasets.detection.coco_torch import load_dataset
import pandas as pd
import plotly.express as px
from deepchecks.core import ConditionResult
class BatchWrapper:
"""Represents dataset batch returned by the dataloader during iteration."""
def __init__(self, batch: BatchOutputFormat, task_type: TaskType, images_seen_num: int):
self._task_type = task_type
self._batch = batch
self._labels, self._predictions, self._images = None, None, None
self._embeddings, self._additional_data, = None, None
self._image_identifiers = batch.get('image_identifiers')
# if there are no image identifiers, use the number of the image in loading process as identifier
if self._image_identifiers is None:
self._image_identifiers = np.asarray(range(images_seen_num, images_seen_num + len(self)), dtype='str')
self._vision_properties_cache = dict.fromkeys(PropertiesInputType)
def _get_relevant_data_for_properties(self, input_type: PropertiesInputType):
result = []
if input_type == PropertiesInputType.PARTIAL_IMAGES:
for img, bboxes_in_img in zip(self.numpy_images, self.numpy_labels):
if bboxes_in_img is None:
continue
result = result + [crop_image(img, *bbox[1:]) for bbox in bboxes_in_img]
elif input_type == PropertiesInputType.IMAGES:
result = self.numpy_images
elif input_type == PropertiesInputType.LABELS:
result = self.numpy_labels
elif input_type == PropertiesInputType.PREDICTIONS:
result = self.numpy_predictions
return result
def vision_properties(self, properties_list: Optional[List[Dict]], input_type: PropertiesInputType):
"""Calculate and cache the properties for the batch according to the property input type.
Parameters
----------
properties_list: Optional[List[Dict]]
List of properties to calculate. If None, default properties will be calculated.
input_type: PropertiesInputType
The input type of the properties.
Returns
-------
Dict[str, Any]
Dictionary of the properties name to list of property values per data element.
"""
if self._vision_properties_cache[input_type] is None:
self._vision_properties_cache[input_type] = {}
keys_in_cache = self._vision_properties_cache[input_type].keys()
if properties_list is not None:
properties_list = validate_properties(properties_list)
requested_properties_names = [prop['name'] for prop in properties_list]
properties_to_calc = [p for p in properties_list if p['name'] not in keys_in_cache]
if len(properties_to_calc) > 0:
data = self._get_relevant_data_for_properties(input_type)
self._vision_properties_cache[input_type].update(calc_vision_properties(data, properties_to_calc))
else:
if input_type not in [PropertiesInputType.PARTIAL_IMAGES, PropertiesInputType.IMAGES]:
# TODO: add support for quick default properties calculation for other input types
raise DeepchecksProcessError(f'None was passed to properties calculation for input type {input_type}.')
requested_properties_names = [prop['name'] for prop in default_image_properties]
if any(x not in keys_in_cache for x in requested_properties_names):
data = self._get_relevant_data_for_properties(input_type)
self._vision_properties_cache[input_type].update(calc_default_image_properties(data))
return {key: value for key, value in self._vision_properties_cache[input_type].items() if
key in requested_properties_names}
def original_labels(self):
"""Return labels for the batch, formatted in deepchecks format."""
if self._labels is None:
self._labels = self._batch.get('labels')
return self._labels
def numpy_labels(self) -> List[Union[np.ndarray, int]]:
"""Return labels for the batch in numpy format."""
required_dim = 0 if self._task_type == TaskType.CLASSIFICATION else 2
return sequence_to_numpy(self.original_labels, expected_ndim_per_object=required_dim)
def original_predictions(self):
"""Return predictions for the batch, formatted in deepchecks format."""
if self._predictions is None:
self._predictions = self._batch.get('predictions')
return self._predictions
def numpy_predictions(self) -> List[np.ndarray]:
"""Return predictions for the batch in numpy format."""
if self._task_type == TaskType.CLASSIFICATION:
required_dim = 1
elif self._task_type == TaskType.OBJECT_DETECTION:
required_dim = 2
elif self._task_type == TaskType.SEMANTIC_SEGMENTATION:
required_dim = 3
else:
required_dim = None
return sequence_to_numpy(self.original_predictions, expected_ndim_per_object=required_dim)
def original_images(self):
"""Return images for the batch, formatted in deepchecks format."""
if self._images is None:
self._images = self._batch.get('images')
return self._images
def numpy_images(self) -> List[Union[np.ndarray]]:
"""Return images for the batch in numpy format."""
return sequence_to_numpy(self.original_images, 'uint8', 3)
def original_embeddings(self):
"""Return embedding for the batch, formatted in deepchecks format."""
if self._embeddings is None:
self._embeddings = self._batch.get('embeddings')
return self._embeddings
def numpy_embeddings(self) -> List[Union[np.ndarray]]:
"""Return embedding for the batch in numpy format."""
return sequence_to_numpy(self.original_embeddings, 'float32')
def original_additional_data(self):
"""Return additional data for the batch, formatted in deepchecks format."""
if self._additional_data is None:
self._additional_data = self._batch.get('additional_data')
return self._additional_data
def numpy_additional_data(self):
"""Return additional data for the batch in numpy format."""
return sequence_to_numpy(self.original_additional_data)
def original_image_identifiers(self):
"""Return image identifiers for the batch, formatted in deepchecks format."""
return self._image_identifiers
def numpy_image_identifiers(self) -> List[Union[str, int]]:
"""Return image identifiers for the batch in numpy format."""
return sequence_to_numpy(self.original_image_identifiers, 'str', 0)
def __len__(self):
"""Return length of batch."""
data = self.numpy_images if self.numpy_images is not None else self.numpy_predictions if \
self.numpy_predictions is not None else self.numpy_labels if self.numpy_labels is not None else \
self.numpy_embeddings if self.numpy_embeddings is not None else self.numpy_additional_data
return len(data)
The provided code snippet includes necessary dependencies for implementing the `sum_pixel_values` function. Write a Python function `def sum_pixel_values(batch: BatchWrapper) -> np.array` to solve the following problem:
Sum the values of all the pixels in the batch, returning a numpy array with an entry per channel.
Here is the function:
def sum_pixel_values(batch: BatchWrapper) -> np.array:
"""Sum the values of all the pixels in the batch, returning a numpy array with an entry per channel."""
images = batch.original_images
return sum(image.sum(axis=(0, 1)) for image in images) # sum over the batch and pixels | Sum the values of all the pixels in the batch, returning a numpy array with an entry per channel. |
427 | import typing as t
import numpy as np
from deepchecks.core.check_result import CheckResult
from deepchecks.core.checks import DatasetKind
from deepchecks.core.condition import ConditionCategory
from deepchecks.vision.base_checks import TrainTestCheck
from deepchecks.vision.context import Context
from deepchecks.vision.vision_data.batch_wrapper import BatchWrapper
from deepchecks.vision.datasets.detection.coco_torch import load_dataset
import pandas as pd
import plotly.express as px
from deepchecks.core import ConditionResult
class BatchWrapper:
"""Represents dataset batch returned by the dataloader during iteration."""
def __init__(self, batch: BatchOutputFormat, task_type: TaskType, images_seen_num: int):
self._task_type = task_type
self._batch = batch
self._labels, self._predictions, self._images = None, None, None
self._embeddings, self._additional_data, = None, None
self._image_identifiers = batch.get('image_identifiers')
# if there are no image identifiers, use the number of the image in loading process as identifier
if self._image_identifiers is None:
self._image_identifiers = np.asarray(range(images_seen_num, images_seen_num + len(self)), dtype='str')
self._vision_properties_cache = dict.fromkeys(PropertiesInputType)
def _get_relevant_data_for_properties(self, input_type: PropertiesInputType):
result = []
if input_type == PropertiesInputType.PARTIAL_IMAGES:
for img, bboxes_in_img in zip(self.numpy_images, self.numpy_labels):
if bboxes_in_img is None:
continue
result = result + [crop_image(img, *bbox[1:]) for bbox in bboxes_in_img]
elif input_type == PropertiesInputType.IMAGES:
result = self.numpy_images
elif input_type == PropertiesInputType.LABELS:
result = self.numpy_labels
elif input_type == PropertiesInputType.PREDICTIONS:
result = self.numpy_predictions
return result
def vision_properties(self, properties_list: Optional[List[Dict]], input_type: PropertiesInputType):
"""Calculate and cache the properties for the batch according to the property input type.
Parameters
----------
properties_list: Optional[List[Dict]]
List of properties to calculate. If None, default properties will be calculated.
input_type: PropertiesInputType
The input type of the properties.
Returns
-------
Dict[str, Any]
Dictionary of the properties name to list of property values per data element.
"""
if self._vision_properties_cache[input_type] is None:
self._vision_properties_cache[input_type] = {}
keys_in_cache = self._vision_properties_cache[input_type].keys()
if properties_list is not None:
properties_list = validate_properties(properties_list)
requested_properties_names = [prop['name'] for prop in properties_list]
properties_to_calc = [p for p in properties_list if p['name'] not in keys_in_cache]
if len(properties_to_calc) > 0:
data = self._get_relevant_data_for_properties(input_type)
self._vision_properties_cache[input_type].update(calc_vision_properties(data, properties_to_calc))
else:
if input_type not in [PropertiesInputType.PARTIAL_IMAGES, PropertiesInputType.IMAGES]:
# TODO: add support for quick default properties calculation for other input types
raise DeepchecksProcessError(f'None was passed to properties calculation for input type {input_type}.')
requested_properties_names = [prop['name'] for prop in default_image_properties]
if any(x not in keys_in_cache for x in requested_properties_names):
data = self._get_relevant_data_for_properties(input_type)
self._vision_properties_cache[input_type].update(calc_default_image_properties(data))
return {key: value for key, value in self._vision_properties_cache[input_type].items() if
key in requested_properties_names}
def original_labels(self):
"""Return labels for the batch, formatted in deepchecks format."""
if self._labels is None:
self._labels = self._batch.get('labels')
return self._labels
def numpy_labels(self) -> List[Union[np.ndarray, int]]:
"""Return labels for the batch in numpy format."""
required_dim = 0 if self._task_type == TaskType.CLASSIFICATION else 2
return sequence_to_numpy(self.original_labels, expected_ndim_per_object=required_dim)
def original_predictions(self):
"""Return predictions for the batch, formatted in deepchecks format."""
if self._predictions is None:
self._predictions = self._batch.get('predictions')
return self._predictions
def numpy_predictions(self) -> List[np.ndarray]:
"""Return predictions for the batch in numpy format."""
if self._task_type == TaskType.CLASSIFICATION:
required_dim = 1
elif self._task_type == TaskType.OBJECT_DETECTION:
required_dim = 2
elif self._task_type == TaskType.SEMANTIC_SEGMENTATION:
required_dim = 3
else:
required_dim = None
return sequence_to_numpy(self.original_predictions, expected_ndim_per_object=required_dim)
def original_images(self):
"""Return images for the batch, formatted in deepchecks format."""
if self._images is None:
self._images = self._batch.get('images')
return self._images
def numpy_images(self) -> List[Union[np.ndarray]]:
"""Return images for the batch in numpy format."""
return sequence_to_numpy(self.original_images, 'uint8', 3)
def original_embeddings(self):
"""Return embedding for the batch, formatted in deepchecks format."""
if self._embeddings is None:
self._embeddings = self._batch.get('embeddings')
return self._embeddings
def numpy_embeddings(self) -> List[Union[np.ndarray]]:
"""Return embedding for the batch in numpy format."""
return sequence_to_numpy(self.original_embeddings, 'float32')
def original_additional_data(self):
"""Return additional data for the batch, formatted in deepchecks format."""
if self._additional_data is None:
self._additional_data = self._batch.get('additional_data')
return self._additional_data
def numpy_additional_data(self):
"""Return additional data for the batch in numpy format."""
return sequence_to_numpy(self.original_additional_data)
def original_image_identifiers(self):
"""Return image identifiers for the batch, formatted in deepchecks format."""
return self._image_identifiers
def numpy_image_identifiers(self) -> List[Union[str, int]]:
"""Return image identifiers for the batch in numpy format."""
return sequence_to_numpy(self.original_image_identifiers, 'str', 0)
def __len__(self):
"""Return length of batch."""
data = self.numpy_images if self.numpy_images is not None else self.numpy_predictions if \
self.numpy_predictions is not None else self.numpy_labels if self.numpy_labels is not None else \
self.numpy_embeddings if self.numpy_embeddings is not None else self.numpy_additional_data
return len(data)
The provided code snippet includes necessary dependencies for implementing the `count_pixels_in_batch` function. Write a Python function `def count_pixels_in_batch(batch: BatchWrapper) -> int` to solve the following problem:
Count the pixels in the batch.
Here is the function:
def count_pixels_in_batch(batch: BatchWrapper) -> int:
"""Count the pixels in the batch."""
return sum((image.shape[0] * image.shape[1] for image in batch.original_images)) | Count the pixels in the batch. |
428 | import contextlib
import os
import typing as t
from pathlib import Path
import albumentations as A
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.transforms.functional as F
from albumentations.pytorch.transforms import ToTensorV2
from PIL import Image, ImageDraw
from torch.utils.data import DataLoader
from torchvision.datasets import VisionDataset
from torchvision.datasets.utils import download_and_extract_archive
from torchvision.utils import draw_segmentation_masks
from deepchecks.vision import VisionData, BatchOutputFormat
from deepchecks.vision.checks import ImagePropertyDrift
from deepchecks.vision.checks import LabelDrift
The provided code snippet includes necessary dependencies for implementing the `deepchecks_collate_fn` function. Write a Python function `def deepchecks_collate_fn(batch) -> BatchOutputFormat` to solve the following problem:
Return a batch of images, labels and predictions for a batch of data. The expected format is a dictionary with the following keys: 'images', 'labels' and 'predictions', each value is in the deepchecks format for the task. You can also use the BatchOutputFormat class to create the output.
Here is the function:
def deepchecks_collate_fn(batch) -> BatchOutputFormat:
"""Return a batch of images, labels and predictions for a batch of data. The expected format is a dictionary with
the following keys: 'images', 'labels' and 'predictions', each value is in the deepchecks format for the task.
You can also use the BatchOutputFormat class to create the output.
"""
# batch received as iterable of tuples of (image, label) and transformed to tuple of iterables of images and labels:
batch = tuple(zip(*batch))
images = [tensor.numpy().transpose((1, 2, 0)) for tensor in batch[0]]
labels = batch[1]
return BatchOutputFormat(images=images, labels=labels) | Return a batch of images, labels and predictions for a batch of data. The expected format is a dictionary with the following keys: 'images', 'labels' and 'predictions', each value is in the deepchecks format for the task. You can also use the BatchOutputFormat class to create the output. |
429 | import contextlib
import os
import typing as t
from pathlib import Path
import albumentations as A
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.transforms.functional as F
from albumentations.pytorch.transforms import ToTensorV2
from PIL import Image, ImageDraw
from torch.utils.data import DataLoader
from torchvision.datasets import VisionDataset
from torchvision.datasets.utils import download_and_extract_archive
from torchvision.utils import draw_segmentation_masks
from deepchecks.vision import VisionData, BatchOutputFormat
from deepchecks.vision.checks import ImagePropertyDrift
from deepchecks.vision.checks import LabelDrift
The provided code snippet includes necessary dependencies for implementing the `number_of_detections` function. Write a Python function `def number_of_detections(labels) -> t.List[int]` to solve the following problem:
Return a list containing the number of detections per sample in batch.
Here is the function:
def number_of_detections(labels) -> t.List[int]:
"""Return a list containing the number of detections per sample in batch."""
return [masks_per_image.shape[0] for masks_per_image in labels] | Return a list containing the number of detections per sample in batch. |
430 | from datetime import datetime
import joblib
import pandas as pd
from airflow.decorators import dag, task, short_circuit_task
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
def model_training_dag():
@short_circuit_task
def validate_data(**context):
from deepchecks.tabular.suites import data_integrity
from deepchecks.tabular import Dataset
hook = S3Hook('aws_connection')
file_name = hook.download_file(key=context['params']['data_key'], bucket_name=context['params']['bucket'],
local_path='.')
data_df = pd.read_csv(file_name)
dataset = Dataset(data_df, label='label', cat_features=[])
suite_result = data_integrity().run(dataset)
suite_result.save_as_html('data_validation.html')
hook.load_file(
filename='data_validation.html',
key='results/data_validation.html',
bucket_name=context['params']['bucket'],
replace=True
)
context['ti'].xcom_push(key='data', value=file_name)
return suite_result.passed()
@short_circuit_task
def validate_train_test_split(**context):
from deepchecks.tabular.suites import train_test_validation
from deepchecks.tabular import Dataset
data = pd.read_csv(context['ti'].xcom_pull(key='data'))
train_df, test_df = data.iloc[:len(data) // 2], data.iloc[len(data) // 2:]
train_df.to_csv(context['params']['train_path'])
test_df.to_csv(context['params']['test_path'])
train = Dataset(train_df, label='label', cat_features=[])
test = Dataset(test_df, label='label', cat_features=[])
suite_result = train_test_validation().run(train_dataset=train, test_dataset=test)
suite_result.save_as_html('split_validation.html')
hook = S3Hook('aws_connection')
hook.load_file(
filename='split_validation.html',
key='results/split_validation.html',
bucket_name=context['params']['bucket'],
replace=True
)
return suite_result.passed()
@task
def train_model(**context):
train_df = pd.read_csv(context['params']['train_path'])
# Train model and upload to s3
model = ...
joblib.dump(model, context['params']['model_path'])
hook = S3Hook('aws_connection')
hook.load_file(
filename=context['params']['model_path'],
key='results/model.joblib',
bucket_name=context['params']['bucket'],
replace=True
)
@task
def validate_model_performance(**context):
from deepchecks.tabular.suites import model_evaluation
from deepchecks.tabular import Dataset
train_df = pd.read_csv(context['params']['train_path'])
test_df = pd.read_csv(context['params']['test_path'])
model = joblib.load(context['params']['model_path'])
train = Dataset(train_df, label='label', cat_features=[])
test = Dataset(test_df, label='label', cat_features=[])
suite_result = model_evaluation().run(train_dataset=train, test_dataset=test, model=model)
suite_result.save_as_html('model_validation.html')
hook = S3Hook('aws_connection')
hook.load_file(
filename='model_validation.html',
key='results/model_validation.html',
bucket_name=context['params']['bucket'],
replace=True
)
return suite_result.passed()
validate_data() >> validate_train_test_split() >> train_model() >> validate_model_performance() | null |
431 | from datetime import datetime, timedelta
import os
from airflow import DAG
from airflow.operators.python import PythonOperator
import joblib
import pandas as pd
from deepchecks.tabular.datasets.classification import adult
dir_path = "suite_results"
data_path = os.path.join(os.getcwd(), "data")
def load_adult_dataset(**context):
df_train, df_test = adult.load_data(data_format='Dataframe')
try:
os.mkdir(data_path)
except OSError:
print("Creation of the directory {} failed".format(dir_path))
with open(os.path.join(data_path, "adult_train.csv"), "w") as f:
df_train.to_csv(f, index=False)
context["ti"].xcom_push(key="train_path", value=os.path.join(data_path, "adult_train.csv"))
with open(os.path.join(data_path, "adult_test.csv"), "w") as f:
df_test.to_csv(f, index=False)
context["ti"].xcom_push(key="test_path", value=os.path.join(data_path, "adult_test.csv")) | null |
432 | from datetime import datetime, timedelta
import os
from airflow import DAG
from airflow.operators.python import PythonOperator
import joblib
import pandas as pd
from deepchecks.tabular.datasets.classification import adult
data_path = os.path.join(os.getcwd(), "data")
def load_fitted_model(pretrained=True):
"""Load and return a fitted classification model.
Returns
-------
model : Joblib
The model/pipeline that was trained on the adult dataset.
"""
if sklearn.__version__ == _MODEL_VERSION and pretrained:
with urlopen(_MODEL_URL) as f:
model = joblib.load(f)
else:
model = _build_model()
train, _ = load_data()
model.fit(train.data[train.features], train.data[train.label_name])
return model
def load_adult_model(**context):
from deepchecks.tabular.datasets.classification.adult import load_fitted_model
model = load_fitted_model()
with open(os.path.join(data_path, "adult_model.joblib"), "wb") as f:
joblib.dump(model, f)
context["ti"].xcom_push(key="adult_model", value=os.path.join(data_path, "adult_model.joblib")) | null |
433 | from datetime import datetime, timedelta
import os
from airflow import DAG
from airflow.operators.python import PythonOperator
import joblib
import pandas as pd
from deepchecks.tabular.datasets.classification import adult
dir_path = "suite_results"
_target = 'income'
_CAT_FEATURES = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex',
'native-country']
def dataset_integrity_step(**context):
from deepchecks.tabular.suites import data_integrity
from deepchecks.tabular.datasets.classification.adult import _CAT_FEATURES, _target
from deepchecks.tabular import Dataset
adult_train = pd.read_csv(context.get("ti").xcom_pull(key="train_path"))
adult_test = pd.read_csv(context.get("ti").xcom_pull(key="test_path"))
ds_train = Dataset(adult_train, label=_target, cat_features=_CAT_FEATURES)
ds_test = Dataset(adult_test, label=_target, cat_features=_CAT_FEATURES)
train_results = data_integrity().run(ds_train)
test_results = data_integrity().run(ds_test)
try:
os.mkdir('suite_results')
except OSError:
print("Creation of the directory {} failed".format(dir_path))
run_time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
train_results.save_as_html(os.path.join(dir_path, f'train_integrity_{run_time}.html'))
test_results.save_as_html(os.path.join(dir_path, f'test_integrity_{run_time}.html')) | null |
434 | from datetime import datetime, timedelta
import os
from airflow import DAG
from airflow.operators.python import PythonOperator
import joblib
import pandas as pd
from deepchecks.tabular.datasets.classification import adult
dir_path = "suite_results"
_target = 'income'
_CAT_FEATURES = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex',
'native-country']
def model_evaluation_step(**context):
from deepchecks.tabular.suites import model_evaluation
from deepchecks.tabular.datasets.classification.adult import _CAT_FEATURES, _target
from deepchecks.tabular import Dataset
adult_model = joblib.load(context.get("ti").xcom_pull(key="adult_model"))
adult_train = pd.read_csv(context.get("ti").xcom_pull(key="train_path"))
adult_test = pd.read_csv(context.get("ti").xcom_pull(key="test_path"))
ds_train = Dataset(adult_train, label=_target, cat_features=_CAT_FEATURES)
ds_test = Dataset(adult_test, label=_target, cat_features=_CAT_FEATURES)
evaluation_results = model_evaluation().run(ds_train, ds_test, adult_model)
run_time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
evaluation_results.save_as_html(os.path.join(dir_path, f'model_evaluation_{run_time}.html')) | null |
435 | import torch
from transformers import DetrForObjectDetection
from typing import Union, List, Iterable
import numpy as np
from deepchecks.vision import VisionData
import torchvision.transforms as T
class COCODETRData:
"""Class for loading the COCO dataset meant for the DETR ResNet50 model`.
Implement the necessary methods to load the images, labels and generate model predictions in a format comprehensible
by deepchecks.
"""
# This is the list of classes returned by the DETR model. Stored in order to convert to the same class order as the
# COCO dataset used by the YOLOv5s model.
DETR_CLASSES = [
'N/A', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack',
'umbrella', 'N/A', 'N/A', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'N/A', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', 'N/A',
'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',
'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A',
'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
'toothbrush'
]
def __init__(self):
# Create a transform to pre-process the images into a format acceptable by the DETR model.
self.transforms = T.Compose([
T.Resize(800),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Build a dict translating the classes DETR was trained on to the classes YOLO was trained on.
# DETR classes, listed in DETR_CLASSES, include 'N/A' classes which didn't exist in the YOLO version of COCO
# data.
self.label_translation = {}
detr_shift = 0
for i in range(len(self.DETR_CLASSES)):
if self.DETR_CLASSES[i] == 'N/A':
detr_shift += 1
self.label_translation[i] = i - detr_shift
def batch_to_labels(batch) -> Union[List[torch.Tensor], torch.Tensor]:
"""Convert the batch to a list of labels. Copied from deepchecks.vision.datasets.detection.coco"""
def move_class(tensor):
return torch.index_select(tensor, 1, torch.LongTensor([4, 0, 1, 2, 3]).to(tensor.device)) \
if len(tensor) > 0 else tensor
return [move_class(tensor) for tensor in batch[1]]
def batch_to_images(batch) -> Iterable[np.ndarray]:
"""Convert the batch to a list of images. Copied from deepchecks.vision.datasets.detection.coco"""
return [np.array(x) for x in batch[0]]
def _detect(self, im, model, device):
"""A helper function. Applies DETR detection to a single PIL image."""
def box_cxcywh_to_xyxy(x):
"""Convert bounding box format from [cx, cy, w, h] to [xmin, ymin, xmax, ymax], when c is "center"."""
x_c, y_c, w, h = x.unbind(1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=1).clip(0, 1)
def rescale_bboxes(out_bbox, size):
"""Rescale bounding boxes from the DETR model's normalized output to the original image size."""
img_w, img_h = size
b = box_cxcywh_to_xyxy(out_bbox)
b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
return b
# Apply the transform to the image.
img = self.transforms(im).unsqueeze(0)
# propagate through the model
with torch.no_grad():
outputs = model(img.to(device))
# keep only predictions with 0.7+ confidence
probas = outputs['logits'].softmax(-1)[0, :, :-1].cpu()
keep = probas.max(-1).values > 0.7
# convert boxes from [0; 1] normalized units to image scales.
bboxes_scaled = rescale_bboxes(outputs['pred_boxes'][0, keep].cpu(), im.size)
return probas[keep], bboxes_scaled
def _convert_to_80_labels(self, labels):
"""Use the pre-built self.label_translation to translate the DETR predictions to YOLO COCO classes."""
return torch.Tensor([self.label_translation[label] for label in labels]).reshape((-1, 1))
def infer_on_batch(self, batch, model, device) -> Union[List[torch.Tensor], torch.Tensor]:
"""Infer on a batch of images and return it in deepchecks format.
Return a list of prediction tensors (one for each image) containing in each row:
[x_min, y_min, width, height, confidence, class_id]
"""
processed_preds = []
# Iterate over images in the batch
for batch_idx in range(len(batch[0])):
probas, bboxes_scaled = self._detect(batch[0][batch_idx], model, device)
bboxes_scaled[:, 2:] = bboxes_scaled[:, 2:] - bboxes_scaled[:, :2] # xyxy to xywh
if len(probas) > 0:
processed_pred = torch.cat([bboxes_scaled, # xywh bbox coordinates
probas.max(dim=1)[0].reshape((-1, 1)), # confidence
self._convert_to_80_labels(probas.argmax(dim=1).tolist())],
# translated class id
dim=1)
processed_preds.append(processed_pred)
return processed_preds
from deepchecks.vision.datasets.detection import coco_torch as coco
from deepchecks.vision.datasets.detection import coco_utils
from deepchecks.vision.vision_data import BatchOutputFormat
from deepchecks.vision.checks import MeanAveragePrecisionReport
The provided code snippet includes necessary dependencies for implementing the `deepchecks_collate_fn_generator` function. Write a Python function `def deepchecks_collate_fn_generator(model, device)` to solve the following problem:
Generates a collate function that converts the batch to the deepchecks format, using the given model.
Here is the function:
def deepchecks_collate_fn_generator(model, device):
"""Generates a collate function that converts the batch to the deepchecks format, using the given model."""
detr_formatter = COCODETRData()
def deepchecks_collate_fn(batch):
"""A collate function that converts the batch to the format expected by deepchecks."""
# Reproduce the steps of the default collate function
batch = list(zip(*batch))
images = detr_formatter.batch_to_images(batch)
labels = detr_formatter.batch_to_labels(batch)
predictions = detr_formatter.infer_on_batch(batch, model, device)
return BatchOutputFormat(images=images, labels=labels, predictions=predictions)
return deepchecks_collate_fn | Generates a collate function that converts the batch to the deepchecks format, using the given model. |
436 | import inspect
from typing import Callable
from deepchecks.core import DatasetKind
from deepchecks.core.errors import DeepchecksBaseError
from deepchecks.tabular import Context, SingleDatasetCheck, checks
from deepchecks.tabular.datasets.classification import lending_club
from deepchecks.tabular.datasets.regression import avocado
class DeepchecksBaseError(Exception):
"""Base exception class for all 'Deepchecks' error types."""
def __init__(self, message: str, html: str = None):
super().__init__(message)
self.message = message
self.html = html or message
def run_check_fn(check_class) -> Callable:
def run(self, cache, dataset_name):
context = cache[dataset_name]
check = check_class()
try:
if isinstance(check, SingleDatasetCheck):
check.run_logic(context, DatasetKind.TRAIN)
else:
check.run_logic(context)
except DeepchecksBaseError:
pass
return run | null |
437 | import inspect
from typing import Callable
from deepchecks.core import DatasetKind
from deepchecks.core.errors import DeepchecksBaseError
from deepchecks.tabular import Context, SingleDatasetCheck, checks
from deepchecks.tabular.datasets.classification import lending_club
from deepchecks.tabular.datasets.regression import avocado
def setup_lending_club() -> Context:
train, test = lending_club.load_data()
model = lending_club.load_fitted_model()
context = Context(train, test, model)
context.feature_importance # calculating here to avoid first check being slower
return context | null |
438 | import inspect
from typing import Callable
from deepchecks.core import DatasetKind
from deepchecks.core.errors import DeepchecksBaseError
from deepchecks.tabular import Context, SingleDatasetCheck, checks
from deepchecks.tabular.datasets.classification import lending_club
from deepchecks.tabular.datasets.regression import avocado
def setup_avocado() -> Context:
train, test = avocado.load_data()
model = avocado.load_fitted_model()
context = Context(train, test, model)
context.feature_importance # calculating here to avoid first check being slower
return context | null |
439 | from typing import Callable
import torch
from deepchecks.core.errors import DeepchecksBaseError
from deepchecks.vision import SingleDatasetCheck, TrainTestCheck
from deepchecks.vision.datasets.classification import mnist_torch as mnist
from deepchecks.vision.datasets.detection import coco_torch as coco
from deepchecks.vision.vision_data import VisionData
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
class DeepchecksBaseError(Exception):
"""Base exception class for all 'Deepchecks' error types."""
def __init__(self, message: str, html: str = None):
super().__init__(message)
self.message = message
self.html = html or message
def run_check_fn(check_class) -> Callable:
def run(self, cache, dataset_name):
train_ds, test_ds, train_pred, test_pred = cache[dataset_name]
check = check_class()
try:
if isinstance(check, SingleDatasetCheck):
check.run(train_ds, train_predictions=train_pred, device=device)
elif isinstance(check, TrainTestCheck):
check.run(train_ds, test_ds, train_predictions=train_pred,
test_predictions=test_pred, device=device)
except DeepchecksBaseError:
pass
return run | null |
440 | from typing import Callable
import torch
from deepchecks.core.errors import DeepchecksBaseError
from deepchecks.vision import SingleDatasetCheck, TrainTestCheck
from deepchecks.vision.datasets.classification import mnist_torch as mnist
from deepchecks.vision.datasets.detection import coco_torch as coco
from deepchecks.vision.vision_data import VisionData
def create_static_predictions(train: VisionData, test: VisionData, model):
static_preds = []
for vision_data in [train, test]:
if vision_data is not None:
static_pred = {}
for i, batch in enumerate(vision_data):
predictions = vision_data.infer_on_batch(batch, model, device)
indexes = list(vision_data.data_loader.batch_sampler)[i]
static_pred.update(dict(zip(indexes, predictions)))
else:
static_pred = None
static_preds.append(static_pred)
train_preds, tests_preds = static_preds
return train_preds, tests_preds
def setup_mnist():
mnist_model = mnist.load_model()
train_ds = mnist.load_dataset(train=True, object_type='VisionData')
test_ds = mnist.load_dataset(train=False, object_type='VisionData')
train_preds, tests_preds = create_static_predictions(train_ds, test_ds, mnist_model)
return train_ds, test_ds, train_preds, tests_preds | null |
441 | from typing import Callable
import torch
from deepchecks.core.errors import DeepchecksBaseError
from deepchecks.vision import SingleDatasetCheck, TrainTestCheck
from deepchecks.vision.datasets.classification import mnist_torch as mnist
from deepchecks.vision.datasets.detection import coco_torch as coco
from deepchecks.vision.vision_data import VisionData
def create_static_predictions(train: VisionData, test: VisionData, model):
def setup_coco():
coco_model = coco.load_model()
train_ds = coco.load_dataset(train=True, object_type='VisionData')
test_ds = coco.load_dataset(train=False, object_type='VisionData')
train_preds, tests_preds = create_static_predictions(train_ds, test_ds, coco_model)
return train_ds, test_ds, train_preds, tests_preds | null |
442 | import warnings
import numpy as np
import pandas as pd
from pandas.api.types import (is_bool_dtype, is_categorical_dtype, is_datetime64_any_dtype, is_numeric_dtype,
is_object_dtype, is_string_dtype, is_timedelta64_dtype)
from sklearn import preprocessing, tree
from sklearn.metrics import f1_score, mean_absolute_error
from sklearn.model_selection import cross_val_score
from deepchecks.utils.typing import Hashable
def _normalized_mae_score(model_mae, naive_mae):
"""Normalize the model MAE score, given the baseline score."""
# # Value range of MAE is [0, infinity), 0 is best
# 10, 5 ==> 0 because worse than naive
# 10, 20 ==> 0.5
# 5, 20 ==> 0.75 = 1 - (mae/base_mae)
if model_mae > naive_mae:
return 0
else:
return 1 - (model_mae / naive_mae)
The provided code snippet includes necessary dependencies for implementing the `_mae_normalizer` function. Write a Python function `def _mae_normalizer(df, y, model_score, **kwargs)` to solve the following problem:
In case of MAE, calculates the baseline score for y and derives the PPS.
Here is the function:
def _mae_normalizer(df, y, model_score, **kwargs):
"""In case of MAE, calculates the baseline score for y and derives the PPS."""
df["naive"] = df[y].median()
baseline_score = mean_absolute_error(df[y], df["naive"]) # true, pred
ppscore = _normalized_mae_score(abs(model_score), baseline_score)
return ppscore, baseline_score | In case of MAE, calculates the baseline score for y and derives the PPS. |
443 | import warnings
import numpy as np
import pandas as pd
from pandas.api.types import (is_bool_dtype, is_categorical_dtype, is_datetime64_any_dtype, is_numeric_dtype,
is_object_dtype, is_string_dtype, is_timedelta64_dtype)
from sklearn import preprocessing, tree
from sklearn.metrics import f1_score, mean_absolute_error
from sklearn.model_selection import cross_val_score
from deepchecks.utils.typing import Hashable
def _normalized_f1_score(model_f1, baseline_f1):
"""Normalize the model F1 score, given the baseline score."""
# # F1 ranges from 0 to 1
# # 1 is best
# 0.5, 0.7 ==> 0 because model is worse than naive baseline
# 0.75, 0.5 ==> 0.5
#
if model_f1 < baseline_f1:
return 0
else:
scale_range = 1.0 - baseline_f1 # eg 0.3
f1_diff = model_f1 - baseline_f1 # eg 0.1
return f1_diff / scale_range # 0.1/0.3 = 0.33
The provided code snippet includes necessary dependencies for implementing the `_f1_normalizer` function. Write a Python function `def _f1_normalizer(df, y, model_score, random_seed)` to solve the following problem:
In case of F1, calculates the baseline score for y and derives the PPS.
Here is the function:
def _f1_normalizer(df, y, model_score, random_seed):
"""In case of F1, calculates the baseline score for y and derives the PPS."""
label_encoder = preprocessing.LabelEncoder()
df["truth"] = label_encoder.fit_transform(df[y])
df["most_common_value"] = df["truth"].value_counts().index[0]
random = df["truth"].sample(frac=1, random_state=random_seed)
baseline_score = max(
f1_score(df["truth"], df["most_common_value"], average="weighted"),
f1_score(df["truth"], random, average="weighted"),
)
ppscore = _normalized_f1_score(model_score, baseline_score)
return ppscore, baseline_score | In case of F1, calculates the baseline score for y and derives the PPS. |
444 | import warnings
import numpy as np
import pandas as pd
from pandas.api.types import (is_bool_dtype, is_categorical_dtype, is_datetime64_any_dtype, is_numeric_dtype,
is_object_dtype, is_string_dtype, is_timedelta64_dtype)
from sklearn import preprocessing, tree
from sklearn.metrics import f1_score, mean_absolute_error
from sklearn.model_selection import cross_val_score
from deepchecks.utils.typing import Hashable
def score(
df,
x,
y,
task=NOT_SUPPORTED_ANYMORE,
sample=5_000,
cross_validation=4,
random_seed=123,
invalid_score=0,
catch_errors=True,
):
"""
Calculate the Predictive Power Score (PPS) for "x predicts y".
The score always ranges from 0 to 1 and is data-type agnostic.
A score of 0 means that the column x cannot predict the column y better than a naive baseline model.
A score of 1 means that the column x can perfectly predict the column y given the model.
A score between 0 and 1 states the ratio of how much potential predictive power the model achieved compared to the
baseline model.
Parameters
----------
df : pandas.DataFrame
Dataframe that contains the columns x and y
x : str
Name of the column x which acts as the feature
y : str
Name of the column y which acts as the target
sample : int or `None`
Number of rows for sampling. The sampling decreases the calculation time of the PPS.
If `None` there will be no sampling.
cross_validation : int
Number of iterations during cross-validation. This has the following implications:
For example, if the number is 4, then it is possible to detect patterns when there are at least 4 times the same
observation. If the limit is increased, the required minimum observations also increase. This is important,
because this is the limit when sklearn will throw an error and the PPS cannot be calculated
random_seed : int or `None`
Random seed for the parts of the calculation that require random numbers, e.g. shuffling or sampling.
If the value is set, the results will be reproducible. If the value is `None` a new random number is drawn at
the start of each calculation.
invalid_score : any
The score that is returned when a calculation is invalid, e.g. because the data type was not supported.
catch_errors : bool
If `True` all errors will be catched and reported as `unknown_error` which ensures convenience. If `False`
errors will be raised. This is helpful for inspecting and debugging errors.
Returns
-------
Dict
A dict that contains multiple fields about the resulting PPS.
The dict enables introspection into the calculations that have been performed under the hood
"""
if not isinstance(df, pd.DataFrame):
raise TypeError(
f"The 'df' argument should be a pandas.DataFrame but you passed a {type(df)}\nPlease convert your input to "
f"a pandas.DataFrame"
)
if not _is_column_in_df(x, df):
raise ValueError(
f"The 'x' argument should be the name of a dataframe column but the variable that you passed is not a "
f"column in the given dataframe.\nPlease review the column name or your dataframe"
)
if len(df[[x]].columns) >= 2:
raise AssertionError(
f"The dataframe has {len(df[[x]].columns)} columns with the same column name {x}\nPlease adjust the "
f"dataframe and make sure that only 1 column has the name {x}"
)
if not _is_column_in_df(y, df):
raise ValueError(
f"The 'y' argument should be the name of a dataframe column but the variable that you passed is not a "
f"column in the given dataframe.\nPlease review the column name or your dataframe"
)
if len(df[[y]].columns) >= 2:
raise AssertionError(
f"The dataframe has {len(df[[y]].columns)} columns with the same column name {y}\nPlease adjust the "
f"dataframe and make sure that only 1 column has the name {y}"
)
if task is not NOT_SUPPORTED_ANYMORE:
raise AttributeError(
"The attribute 'task' is no longer supported because it led to confusion and inconsistencies.\nThe task of the model is now determined based on the data types of the columns. If you want to change the task please adjust the data type of the column.\nFor more details, please refer to the README"
)
if random_seed is None:
from random import random
random_seed = int(random() * 1000)
try:
return _score(
df,
x,
y,
task,
sample,
cross_validation,
random_seed,
invalid_score,
catch_errors,
)
except Exception as exception:
if catch_errors:
case_type = "unknown_error"
task = _get_task(case_type, invalid_score)
return {
"x": x,
"y": y,
"ppscore": task["ppscore"],
"case": case_type,
"is_valid_score": task["is_valid_score"],
"metric": task["metric_name"],
"baseline_score": task["baseline_score"],
"model_score": task["model_score"], # sklearn returns negative mae
"model": task["model"],
}
else:
raise exception
def _format_list_of_dicts(scores, output, sorted):
"""
Format list of score dicts `scores`.
- maybe sort by ppscore
- maybe return pandas.Dataframe
- output can be one of ["df", "list"]
"""
if sorted:
scores.sort(key=lambda item: item["ppscore"], reverse=True)
if output == "df":
df_columns = [
"x",
"y",
"ppscore",
"case",
"is_valid_score",
"metric",
"baseline_score",
"model_score",
"model",
]
data = {column: [score[column] for score in scores] for column in df_columns}
scores = pd.DataFrame.from_dict(data)
return scores
The provided code snippet includes necessary dependencies for implementing the `matrix` function. Write a Python function `def matrix(df, output="df", sorted=False, **kwargs)` to solve the following problem:
Calculate the Predictive Power Score (PPS) matrix for all columns in the dataframe. Args: df : pandas.DataFrame The dataframe that contains the data output: str - potential values: "df", "list" Control the type of the output. Either return a pandas.DataFrame (df) or a list with the score dicts sorted: bool Whether or not to sort the output dataframe/list by the ppscore kwargs: Other key-word arguments that shall be forwarded to the pps.score method, e.g. `sample, `cross_validation, `random_seed, `invalid_score`, `catch_errors` Returns: pandas.DataFrame or list of Dict Either returns a tidy dataframe or a list of all the PPS dicts. This can be influenced by the output argument
Here is the function:
def matrix(df, output="df", sorted=False, **kwargs):
"""
Calculate the Predictive Power Score (PPS) matrix for all columns in the dataframe.
Args:
df : pandas.DataFrame
The dataframe that contains the data
output: str - potential values: "df", "list"
Control the type of the output. Either return a pandas.DataFrame (df) or a list with the score dicts
sorted: bool
Whether or not to sort the output dataframe/list by the ppscore
kwargs:
Other key-word arguments that shall be forwarded to the pps.score method,
e.g. `sample, `cross_validation, `random_seed, `invalid_score`, `catch_errors`
Returns:
pandas.DataFrame or list of Dict
Either returns a tidy dataframe or a list of all the PPS dicts. This can be influenced
by the output argument
"""
if not isinstance(df, pd.DataFrame):
raise TypeError(
f"The 'df' argument should be a pandas.DataFrame but you passed a {type(df)}\nPlease convert your input to a pandas.DataFrame"
)
if not output in ["df", "list"]:
raise ValueError(
f"""The 'output' argument should be one of ["df", "list"] but you passed: {output}\nPlease adjust your input to one of the valid values"""
)
if not sorted in [True, False]:
raise ValueError(
f"""The 'sorted' argument should be one of [True, False] but you passed: {sorted}\nPlease adjust your input to one of the valid values"""
)
scores = [score(df, x, y, **kwargs) for x in df for y in df]
return _format_list_of_dicts(scores=scores, output=output, sorted=sorted) | Calculate the Predictive Power Score (PPS) matrix for all columns in the dataframe. Args: df : pandas.DataFrame The dataframe that contains the data output: str - potential values: "df", "list" Control the type of the output. Either return a pandas.DataFrame (df) or a list with the score dicts sorted: bool Whether or not to sort the output dataframe/list by the ppscore kwargs: Other key-word arguments that shall be forwarded to the pps.score method, e.g. `sample, `cross_validation, `random_seed, `invalid_score`, `catch_errors` Returns: pandas.DataFrame or list of Dict Either returns a tidy dataframe or a list of all the PPS dicts. This can be influenced by the output argument |
445 | from deepchecks.nlp import Suite
from deepchecks.nlp.checks import (ConflictingLabels, FrequentSubstrings, LabelDrift, MetadataSegmentsPerformance,
PredictionDrift, PropertyDrift, PropertyLabelCorrelation,
PropertySegmentsPerformance, SpecialCharacters, TextDuplicates, TextEmbeddingsDrift,
TextPropertyOutliers, TrainTestPerformance, TrainTestSamplesMix,
UnderAnnotatedMetaDataSegments, UnderAnnotatedPropertySegments, UnknownTokens)
def data_integrity(n_samples: int = None,
random_state: int = 42,
**kwargs) -> Suite:
"""Suite for detecting integrity issues within a single dataset.
Parameters
----------
n_samples : int , default: None
number of samples to use for checks that sample data. If none, using the default n_samples per check.
random_state : int, default: 42
random seed for all checkss.
**kwargs : dict
additional arguments to pass to the checks.
Returns
-------
Suite
A suite for validating correctness of train-test split, including distribution, \
leakage and integrity checks.
Examples
--------
>>> from deepchecks.nlp.suites import data_integrity
>>> suite = data_integrity(n_samples=1_000_000)
>>> result = suite.run()
>>> result.show()
"""
args = locals()
args.pop('kwargs')
non_none_args = {k: v for k, v in args.items() if v is not None}
kwargs = {**non_none_args, **kwargs}
return Suite(
'Data Integrity Suite',
TextPropertyOutliers(**kwargs).add_condition_outlier_ratio_less_or_equal(),
UnknownTokens(**kwargs).add_condition_ratio_of_unknown_words_less_or_equal(),
UnderAnnotatedPropertySegments(**kwargs).add_condition_segments_relative_performance_greater_than(),
UnderAnnotatedMetaDataSegments(**kwargs).add_condition_segments_relative_performance_greater_than(),
PropertyLabelCorrelation(**kwargs).add_condition_property_pps_less_than(),
ConflictingLabels(**kwargs).add_condition_ratio_of_conflicting_labels_less_or_equal(),
TextDuplicates(**kwargs).add_condition_ratio_less_or_equal(),
SpecialCharacters(**kwargs).add_condition_samples_ratio_w_special_characters_less_or_equal(),
FrequentSubstrings(**kwargs).add_condition_zero_result(),
)
def train_test_validation(n_samples: int = None,
random_state: int = 42,
**kwargs) -> Suite:
"""Suite for validating correctness of train-test split, including distribution, \
leakage and integrity checks.
Parameters
----------
n_samples : int , default: None
number of samples to use for checks that sample data. If none, using the default n_samples per check.
random_state : int, default: 42
random seed for all checkss.
**kwargs : dict
additional arguments to pass to the checks.
Returns
-------
Suite
A suite for validating correctness of train-test split, including distribution, \
leakage and integrity checks.
Examples
--------
>>> from deepchecks.nlp.suites import train_test_validation
>>> suite = train_test_validation(n_samples=1_000_000)
>>> result = suite.run()
>>> result.show()
"""
args = locals()
args.pop('kwargs')
non_none_args = {k: v for k, v in args.items() if v is not None}
kwargs = {**non_none_args, **kwargs}
return Suite(
'Train Test Validation Suite',
PropertyDrift(**kwargs).add_condition_drift_score_less_than(),
LabelDrift(**kwargs).add_condition_drift_score_less_than(),
TextEmbeddingsDrift(**kwargs).add_condition_overall_drift_value_less_than(),
TrainTestSamplesMix(**kwargs).add_condition_duplicates_ratio_less_or_equal(),
)
def model_evaluation(n_samples: int = None,
random_state: int = 42,
**kwargs) -> Suite:
"""Suite for evaluating the model's performance over different metrics, segments, error analysis, examining \
overfitting, comparing to baseline, and more.
Parameters
----------
n_samples : int , default: 1_000_000
number of samples to use for checks that sample data. If none, use the default n_samples per check.
random_state : int, default: 42
random seed for all checks.
**kwargs : dict
additional arguments to pass to the checks.
Returns
-------
Suite
A suite for evaluating the model's performance.
Examples
--------
>>> from deepchecks.nlp.suites import model_evaluation
>>> suite = model_evaluation(n_samples=1_000_000)
>>> result = suite.run()
>>> result.show()
"""
args = locals()
args.pop('kwargs')
non_none_args = {k: v for k, v in args.items() if v is not None}
kwargs = {**non_none_args, **kwargs}
return Suite(
'Model Evaluation Suite',
PredictionDrift(**kwargs).add_condition_drift_score_less_than(),
TrainTestPerformance(**kwargs).add_condition_train_test_relative_degradation_less_than(),
PropertySegmentsPerformance(**kwargs).add_condition_segments_relative_performance_greater_than(),
MetadataSegmentsPerformance(**kwargs).add_condition_segments_relative_performance_greater_than(),
)
The provided code snippet includes necessary dependencies for implementing the `full_suite` function. Write a Python function `def full_suite(**kwargs) -> Suite` to solve the following problem:
Create a suite that includes many of the implemented checks, for a quick overview of your model and data.
Here is the function:
def full_suite(**kwargs) -> Suite:
"""Create a suite that includes many of the implemented checks, for a quick overview of your model and data."""
return Suite(
'Full Suite',
model_evaluation(**kwargs),
train_test_validation(**kwargs),
data_integrity(**kwargs),
) | Create a suite that includes many of the implemented checks, for a quick overview of your model and data. |
446 | import collections
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Set, Tuple, Type, cast
import numpy as np
import pandas as pd
from deepchecks.core.errors import DeepchecksValueError, ValidationError
from deepchecks.nlp.task_type import TaskType, TTextLabel
from deepchecks.utils.logger import get_logger
from deepchecks.utils.metrics import is_label_none
from deepchecks.utils.type_inference import infer_categorical_features, infer_numerical_features
from deepchecks.utils.validation import is_sequence_not_str
class DeepchecksValueError(DeepchecksBaseError):
"""Exception class that represent a fault parameter was passed to Deepchecks."""
pass
def is_sequence_not_str(value) -> TypeGuard[t.Sequence[t.Any]]:
"""Check if value is a non str sequence."""
return (
not isinstance(value, (bytes, str, bytearray))
and isinstance(value, (t.Sequence, pd.Series, np.ndarray))
)
The provided code snippet includes necessary dependencies for implementing the `validate_tokenized_text` function. Write a Python function `def validate_tokenized_text(tokenized_text: Optional[Sequence[Sequence[str]]])` to solve the following problem:
Validate tokenized text format.
Here is the function:
def validate_tokenized_text(tokenized_text: Optional[Sequence[Sequence[str]]]):
"""Validate tokenized text format."""
error_string = 'tokenized_text must be a Sequence of Sequences of strings'
if not is_sequence_not_str(tokenized_text):
raise DeepchecksValueError(error_string)
if not all(is_sequence_not_str(x) for x in tokenized_text):
raise DeepchecksValueError(error_string)
if not all(isinstance(x, str) for tokens in tokenized_text for x in tokens):
raise DeepchecksValueError(error_string) | Validate tokenized text format. |
447 | import collections
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Set, Tuple, Type, cast
import numpy as np
import pandas as pd
from deepchecks.core.errors import DeepchecksValueError, ValidationError
from deepchecks.nlp.task_type import TaskType, TTextLabel
from deepchecks.utils.logger import get_logger
from deepchecks.utils.metrics import is_label_none
from deepchecks.utils.type_inference import infer_categorical_features, infer_numerical_features
from deepchecks.utils.validation import is_sequence_not_str
class DeepchecksValueError(DeepchecksBaseError):
"""Exception class that represent a fault parameter was passed to Deepchecks."""
pass
def is_sequence_not_str(value) -> TypeGuard[t.Sequence[t.Any]]:
"""Check if value is a non str sequence."""
return (
not isinstance(value, (bytes, str, bytearray))
and isinstance(value, (t.Sequence, pd.Series, np.ndarray))
)
The provided code snippet includes necessary dependencies for implementing the `validate_raw_text` function. Write a Python function `def validate_raw_text(raw_text: Optional[Sequence[str]])` to solve the following problem:
Validate text format.
Here is the function:
def validate_raw_text(raw_text: Optional[Sequence[str]]):
"""Validate text format."""
error_string = 'raw_text must be a Sequence of strings'
if not is_sequence_not_str(raw_text):
raise DeepchecksValueError(error_string)
if not all(isinstance(x, str) for x in raw_text):
raise DeepchecksValueError(error_string) | Validate text format. |
448 | import collections
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Set, Tuple, Type, cast
import numpy as np
import pandas as pd
from deepchecks.core.errors import DeepchecksValueError, ValidationError
from deepchecks.nlp.task_type import TaskType, TTextLabel
from deepchecks.utils.logger import get_logger
from deepchecks.utils.metrics import is_label_none
from deepchecks.utils.type_inference import infer_categorical_features, infer_numerical_features
from deepchecks.utils.validation import is_sequence_not_str
def label_is_null(input_label):
"""Check if the label is null for different possible input types."""
if input_label is None:
return True
if is_sequence_not_str(input_label):
if len(input_label) == 0:
return True
if isinstance(input_label, pd.Series):
first_element = input_label.iloc[0]
else:
first_element = input_label[0]
if is_sequence_not_str(first_element):
return all(pd.isnull(x).all() for x in input_label)
else:
return all(pd.isnull(x) for x in input_label)
else:
return False
class DeepchecksValueError(DeepchecksBaseError):
"""Exception class that represent a fault parameter was passed to Deepchecks."""
pass
TTextLabel = t.Union[TClassLabel, TTokenLabel, TNoneLabel]
class TaskType(Enum):
"""Enum containing supported task types."""
TEXT_CLASSIFICATION = 'text_classification'
TOKEN_CLASSIFICATION = 'token_classification'
OTHER = 'other'
def is_label_none(label):
"""Check if label (single label of a sample) is None."""
result = pd.isnull(label)
if isinstance(result, bool):
return result
return any(result)
def is_sequence_not_str(value) -> TypeGuard[t.Sequence[t.Any]]:
"""Check if value is a non str sequence."""
return (
not isinstance(value, (bytes, str, bytearray))
and isinstance(value, (t.Sequence, pd.Series, np.ndarray))
)
The provided code snippet includes necessary dependencies for implementing the `validate_modify_label` function. Write a Python function `def validate_modify_label(labels: Optional[TTextLabel], task_type: TaskType, expected_size: int, tokenized_text: Optional[Sequence[Sequence[str]]]) -> Optional[TTextLabel]` to solve the following problem:
Validate and process label to accepted formats.
Here is the function:
def validate_modify_label(labels: Optional[TTextLabel], task_type: TaskType, expected_size: int,
tokenized_text: Optional[Sequence[Sequence[str]]]) -> Optional[TTextLabel]:
"""Validate and process label to accepted formats."""
if label_is_null(labels):
return None
if not is_sequence_not_str(labels):
raise DeepchecksValueError('label must be a Sequence')
if not len(labels) == expected_size:
raise DeepchecksValueError(f'Label length ({len(labels)}) does not match expected length ({expected_size})')
if task_type == TaskType.TEXT_CLASSIFICATION:
if all(is_sequence_not_str(x) or is_label_none(x) for x in labels): # Multilabel
multilabel_error = 'multilabel was identified. It must be a Sequence of Sequences of 0 or 1.'
if not all(all(y in (0, 1) for y in x) for x in labels if not is_label_none(x)):
raise DeepchecksValueError(multilabel_error)
if any(len(labels[0]) != len(labels[i]) for i in range(len(labels)) if not is_label_none(labels[i])):
raise DeepchecksValueError('All multilabel entries must be of the same length, which is the number'
' of possible classes.')
labels = [[None]*len(labels[0]) if is_label_none(label_per_sample) else [int(x) for x in label_per_sample]
for label_per_sample in labels]
elif any(not isinstance(x, (str, np.integer, int)) and not pd.isna(x) for x in labels):
raise DeepchecksValueError('label must be a Sequence of strings or ints (multiclass classification) '
'or a Sequence of Sequences of strings or ints (multilabel classification)')
else:
labels = [None if pd.isna(x) else str(x) for x in labels]
elif task_type == TaskType.TOKEN_CLASSIFICATION:
token_class_error = 'label must be a Sequence of Sequences of either strings or integers.'
if not is_sequence_not_str(labels):
raise DeepchecksValueError(token_class_error)
result = []
for idx, (tokens, label) in enumerate(zip(tokenized_text, labels)): # TODO: Runs on all labels, very costly
if is_label_none(label):
result.append([None]*len(tokens))
else:
if not is_sequence_not_str(label):
raise DeepchecksValueError(token_class_error + f' label at {idx} was of type {type(label)}')
if len(tokens) != len(label):
raise DeepchecksValueError(f'label must be the same length as tokenized_text. '
f'However, for sample index {idx} received token list of length '
f'{len(tokens)} and label list of length {len(label)}')
result.append([str(x) for x in label])
labels = result
return np.asarray(labels, dtype=object) | Validate and process label to accepted formats. |
449 | import collections
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Set, Tuple, Type, cast
import numpy as np
import pandas as pd
from deepchecks.core.errors import DeepchecksValueError, ValidationError
from deepchecks.nlp.task_type import TaskType, TTextLabel
from deepchecks.utils.logger import get_logger
from deepchecks.utils.metrics import is_label_none
from deepchecks.utils.type_inference import infer_categorical_features, infer_numerical_features
from deepchecks.utils.validation import is_sequence_not_str
class DeepchecksValueError(DeepchecksBaseError):
"""Exception class that represent a fault parameter was passed to Deepchecks."""
pass
The provided code snippet includes necessary dependencies for implementing the `validate_length_and_type_numpy_array` function. Write a Python function `def validate_length_and_type_numpy_array(data: np.ndarray, data_name: str, expected_size: int)` to solve the following problem:
Validate length of numpy array and type.
Here is the function:
def validate_length_and_type_numpy_array(data: np.ndarray, data_name: str, expected_size: int):
"""Validate length of numpy array and type."""
if not isinstance(data, np.ndarray):
raise DeepchecksValueError(
f'{data_name} type {type(data)} is not supported, '
'must be a numpy array'
)
if len(data) != expected_size:
raise DeepchecksValueError(
f'received {data_name} with {len(data)} rows, '
f'expected {expected_size}'
) | Validate length of numpy array and type. |
450 | import collections
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Set, Tuple, Type, cast
import numpy as np
import pandas as pd
from deepchecks.core.errors import DeepchecksValueError, ValidationError
from deepchecks.nlp.task_type import TaskType, TTextLabel
from deepchecks.utils.logger import get_logger
from deepchecks.utils.metrics import is_label_none
from deepchecks.utils.type_inference import infer_categorical_features, infer_numerical_features
from deepchecks.utils.validation import is_sequence_not_str
class ColumnTypes(NamedTuple):
"""Utility data transfer object."""
categorical_columns: List[str]
numerical_columns: List[str]
class DeepchecksValueError(DeepchecksBaseError):
"""Exception class that represent a fault parameter was passed to Deepchecks."""
pass
def get_logger() -> logging.Logger:
"""Retutn the deepchecks logger."""
return _logger
def infer_numerical_features(df: pd.DataFrame) -> t.List[Hashable]:
"""Infers which features are numerical.
Parameters
----------
df : pd.DataFrame
dataframe for which to infer numerical features
Returns
-------
List[Hashable]
list of numerical features
"""
columns = df.columns
numerical_columns = []
for col in columns:
col_data = df[col]
if col_data.dtype == 'object':
# object might still be only floats, so we reset the dtype
col_data = pd.Series(col_data.to_list())
if is_numeric_dtype(col_data):
numerical_columns.append(col)
return numerical_columns
def infer_categorical_features(
df: pd.DataFrame,
max_categorical_ratio: float = 0.01,
max_categories: int = None,
columns: t.Optional[t.List[Hashable]] = None,
) -> t.List[Hashable]:
"""Infers which features are categorical by checking types and number of unique values.
Parameters
----------
df : pd.DataFrame
dataframe for which to infer categorical features
max_categorical_ratio : float , default: 0.01
max_categories : int , default: None
columns : t.Optional[t.List[Hashable]] , default: None
Returns
-------
List[Hashable]
list of categorical features
"""
categorical_dtypes = df.select_dtypes(include='category')
if len(categorical_dtypes.columns) > 0:
return list(categorical_dtypes.columns)
if columns is not None:
dataframe_columns = ensure_hashable_or_mutable_sequence(columns)
else:
dataframe_columns = df.columns
if max_categories is None:
return [
column
for column in dataframe_columns
if is_categorical(
t.cast(pd.Series, df[column]),
max_categorical_ratio)]
else:
return [
column
for column in dataframe_columns
if is_categorical(
t.cast(pd.Series, df[column]),
max_categorical_ratio,
max_categories,
max_categories,
max_categories)]
The provided code snippet includes necessary dependencies for implementing the `validate_length_and_calculate_column_types` function. Write a Python function `def validate_length_and_calculate_column_types( data_table: pd.DataFrame, data_table_name: str, expected_size: int, categorical_columns: Optional[Sequence[str]] = None ) -> ColumnTypes` to solve the following problem:
Validate length of data table and calculate column types.
Here is the function:
def validate_length_and_calculate_column_types(
data_table: pd.DataFrame,
data_table_name: str,
expected_size: int,
categorical_columns: Optional[Sequence[str]] = None
) -> ColumnTypes:
"""Validate length of data table and calculate column types."""
if not isinstance(data_table, pd.DataFrame):
raise DeepchecksValueError(
f'{data_table_name} type {type(data_table)} is not supported, '
'must be a pandas DataFrame'
)
if len(data_table) != expected_size:
raise DeepchecksValueError(
f'received {data_table_name} with {len(data_table)} rows, '
f'expected {expected_size}'
)
if categorical_columns is None: # TODO: Add tests
categorical_columns = infer_categorical_features(data_table)
get_logger().info(
'%s types were not provided, auto inferred as categorical are:\n%s',
data_table_name,
categorical_columns
)
else:
difference = set(categorical_columns).difference(data_table.columns)
if len(difference) != 0:
raise DeepchecksValueError(
f'The following columns does not exist in {data_table_name} - {list(difference)}'
)
other_features = set(data_table.columns) - set(categorical_columns)
numeric_features = infer_numerical_features(data_table[list(other_features)])
return ColumnTypes(
categorical_columns=list(categorical_columns),
numerical_columns=list(numeric_features)
) | Validate length of data table and calculate column types. |
451 | import collections
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Set, Tuple, Type, cast
import numpy as np
import pandas as pd
from deepchecks.core.errors import DeepchecksValueError, ValidationError
from deepchecks.nlp.task_type import TaskType, TTextLabel
from deepchecks.utils.logger import get_logger
from deepchecks.utils.metrics import is_label_none
from deepchecks.utils.type_inference import infer_categorical_features, infer_numerical_features
from deepchecks.utils.validation import is_sequence_not_str
class DataframesDifference(NamedTuple):
"""Facility type for the 'compare_dataframes' function.
Parameters
==========
only_in_train: Tuple[str, ...]
set of columns present only in train dataframe.
only_in_test: Tuple[str, ...]
set of columns present only in test dataframe.
types_mismatch: Tuple[str, ...]
set of columns that are present in both dataframes
but have different types.
"""
only_in_train: Tuple[str, ...]
only_in_test: Tuple[str, ...]
types_mismatch: Tuple[str, ...]
class DataframesComparison(NamedTuple):
"""Facility type for the 'compare_dataframes' function.
Parameters
==========
common: Dict[str, str]
set of columns common for both dataframes.
difference: Optional[DataframesDifference]
difference between two dataframes.
"""
common: Dict[str, str]
difference: Optional[DataframesDifference]
The provided code snippet includes necessary dependencies for implementing the `compare_dataframes` function. Write a Python function `def compare_dataframes( train: pd.DataFrame, test: pd.DataFrame, train_categorical_columns: Optional[Sequence[str]] = None, test_categorical_columns: Optional[Sequence[str]] = None ) -> DataframesComparison` to solve the following problem:
Compare two dataframes and return a difference.
Here is the function:
def compare_dataframes(
train: pd.DataFrame,
test: pd.DataFrame,
train_categorical_columns: Optional[Sequence[str]] = None,
test_categorical_columns: Optional[Sequence[str]] = None
) -> DataframesComparison:
"""Compare two dataframes and return a difference."""
train_categorical_columns = train_categorical_columns or []
test_categorical_columns = test_categorical_columns or []
train_columns = cast(Set[str], set(train.columns))
test_columns = cast(Set[str], set(test.columns))
only_in_train = train_columns.difference(test_columns)
only_in_test = test_columns.difference(train_columns)
common_columns = train_columns.intersection(test_columns)
types_mismatch: Set[str] = set()
for column in common_columns:
is_cat_in_both_dataframes = (
column in train_categorical_columns
and column in test_categorical_columns
)
if is_cat_in_both_dataframes:
continue
if not is_cat_in_both_dataframes:
continue
types_mismatch.add(column)
common = {
column: (
'categorical'
if column in train_categorical_columns
else 'numerical'
)
for column in common_columns.difference(types_mismatch)
}
if only_in_train or only_in_test or types_mismatch:
difference = DataframesDifference(
only_in_train=tuple(only_in_train),
only_in_test=tuple(only_in_test),
types_mismatch=tuple(types_mismatch),
)
else:
difference = None
return DataframesComparison(common, difference) | Compare two dataframes and return a difference. |
452 | import collections
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Set, Tuple, Type, cast
import numpy as np
import pandas as pd
from deepchecks.core.errors import DeepchecksValueError, ValidationError
from deepchecks.nlp.task_type import TaskType, TTextLabel
from deepchecks.utils.logger import get_logger
from deepchecks.utils.metrics import is_label_none
from deepchecks.utils.type_inference import infer_categorical_features, infer_numerical_features
from deepchecks.utils.validation import is_sequence_not_str
class ValidationError(DeepchecksBaseError):
def is_sequence_not_str(value) -> TypeGuard[t.Sequence[t.Any]]:
def _validate_text_classification(
*,
dataset: 'TextData',
predictions: Any = None,
probabilities: Any = None,
n_of_classes: Optional[int] = None,
eps: float = 1e-3
) -> Tuple[
Optional[np.ndarray], # predictions
Optional[np.ndarray], # probabilities
]:
if predictions is not None:
format_error_message = (
f'Check requires predictions for the "{dataset.name}" dataset '
'to be of a type sequence[str] | sequence[int]'
)
if not is_sequence_not_str(predictions):
raise ValidationError(format_error_message)
if len(predictions) != dataset.n_samples:
raise ValidationError(
f'Check requires predictions for the "{dataset.name}" dataset '
f'to have {dataset.n_samples} rows, same as dataset'
)
try:
predictions = np.array(predictions, dtype='object')
except ValueError as e:
raise ValidationError(
'Failed to cast predictions to a numpy array. '
f'{format_error_message}'
) from e
else:
if predictions.ndim == 2 and predictions.shape[1] == 1:
predictions = predictions[:, 0]
if predictions.ndim != 1:
raise ValidationError(format_error_message)
predictions = np.array([
str(it) if it is not None else None
for it in predictions
], dtype='object')
if probabilities is not None:
format_error_message = (
f'Check requires classification probabilities for the "{dataset.name}" '
'dataset to be of a type sequence[sequence[float]] that can be cast to '
'a 2D numpy array of shape (n_samples, n_classes)'
)
if len(probabilities) != dataset.n_samples:
raise ValidationError(
f'Check requires classification probabilities for the "{dataset.name}" '
f'dataset to have {dataset.n_samples} rows, same as dataset'
)
try:
probabilities = np.array(probabilities, dtype='float')
except ValueError as e:
raise ValidationError(
'Failed to cast probabilities to a numpy array. '
f'{format_error_message}'
) from e
else:
if len(probabilities.shape) != 2:
raise ValidationError(format_error_message)
if n_of_classes is not None and probabilities.shape[1] != n_of_classes:
raise ValidationError(
f'Check requires classification probabilities for the "{dataset.name}" dataset '
f'to have {n_of_classes} columns, same as the number of classes'
)
if any(abs(probabilities.sum(axis=1) - 1) > eps):
# TODO: better message
raise ValidationError(
f'Check requires classification probabilities for the "{dataset.name}" '
f'dataset to be probabilities and sum to 1 for each row'
)
return predictions, probabilities | null |
453 | import collections
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Set, Tuple, Type, cast
import numpy as np
import pandas as pd
from deepchecks.core.errors import DeepchecksValueError, ValidationError
from deepchecks.nlp.task_type import TaskType, TTextLabel
from deepchecks.utils.logger import get_logger
from deepchecks.utils.metrics import is_label_none
from deepchecks.utils.type_inference import infer_categorical_features, infer_numerical_features
from deepchecks.utils.validation import is_sequence_not_str
class ValidationError(DeepchecksBaseError):
"""Represents more specific case of the ValueError (DeepchecksValueError)."""
pass
def is_sequence_not_str(value) -> TypeGuard[t.Sequence[t.Any]]:
"""Check if value is a non str sequence."""
return (
not isinstance(value, (bytes, str, bytearray))
and isinstance(value, (t.Sequence, pd.Series, np.ndarray))
)
def _validate_multilabel(
*,
dataset: 'TextData',
predictions: Any = None,
probabilities: Any = None,
n_of_classes: Optional[int] = None,
) -> Tuple[
Optional[np.ndarray], # predictions
Optional[np.ndarray], # probabilities
]:
if predictions is not None:
format_error_message = (
'Check requires multi-label classification predictions for '
f'the "{dataset.name}" dataset to be of a type sequence[sequence[int]] '
'that can be cast to a 2D numpy array of a shape (n_samples, n_classes)'
)
if not is_sequence_not_str(predictions):
raise ValidationError(format_error_message)
if len(predictions) != dataset.n_samples:
raise ValidationError(
'Check requires multi-label classification predictions '
f'for the "{dataset.name}" dataset to have {dataset.n_samples} rows, '
'same as dataset'
)
try:
predictions = np.array(predictions).astype(float)
except ValueError as e:
raise ValidationError(
'Failed to cast multi-label predictions to a numpy array. '
f'{format_error_message}'
) from e
else:
if predictions.ndim != 2:
raise ValidationError(format_error_message)
if n_of_classes is not None and predictions.shape[1] != n_of_classes:
raise ValidationError(
'Check requires multi-label classification predictions '
f'for the "{dataset.name}" dataset to have {n_of_classes} columns, '
'same as the number of classes'
)
if not np.array_equal(predictions, predictions.astype(bool)):
raise ValidationError(
'Check requires multi-label classification predictions '
f'for the "{dataset.name}" dataset to be either 0 or 1'
)
if probabilities is not None:
format_error_message = (
'Check requires multi-label classification probabilities '
f'for the "{dataset.name}" to be of a type sequence[sequences[float]] '
'that can be cast to a 2D numpy array of a shape (n_samples, n_classes). '
'Each label probability value must lay between 0 and 1'
)
if len(probabilities) != dataset.n_samples:
raise ValidationError(
'Check requires multi-label classification probabilities '
f'for the "{dataset.name}" dataset to have {dataset.n_samples} rows, '
'same as dataset'
)
try:
probabilities = np.array(probabilities, dtype='float')
except ValueError as e:
raise ValidationError(
'Failed to cast multi-label probabilities to a numpy '
f'array. {format_error_message}'
) from e
else:
if probabilities.ndim != 2:
raise ValidationError(format_error_message)
if n_of_classes is not None and probabilities.shape[1] != n_of_classes:
raise ValidationError(
f'Check requires multi-label classification probabilities '
f'for the "{dataset.name}" dataset to have {n_of_classes} columns, '
'same as the number of classes'
)
if (probabilities > 1).any() or (probabilities < 0).any():
# TODO: better message
raise ValidationError(format_error_message)
return predictions, probabilities | null |
454 | import collections
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Set, Tuple, Type, cast
import numpy as np
import pandas as pd
from deepchecks.core.errors import DeepchecksValueError, ValidationError
from deepchecks.nlp.task_type import TaskType, TTextLabel
from deepchecks.utils.logger import get_logger
from deepchecks.utils.metrics import is_label_none
from deepchecks.utils.type_inference import infer_categorical_features, infer_numerical_features
from deepchecks.utils.validation import is_sequence_not_str
def _count_types(sequence: Sequence[Any]) -> Dict[Type, int]:
counter = collections.defaultdict(int)
for it in sequence:
counter[type(it)] += 1
return counter
class ValidationError(DeepchecksBaseError):
"""Represents more specific case of the ValueError (DeepchecksValueError)."""
pass
def is_sequence_not_str(value) -> TypeGuard[t.Sequence[t.Any]]:
"""Check if value is a non str sequence."""
return (
not isinstance(value, (bytes, str, bytearray))
and isinstance(value, (t.Sequence, pd.Series, np.ndarray))
)
def _validate_token_classification(
*,
dataset: 'TextData',
predictions: Any = None,
probabilities: Any = None,
):
if probabilities is not None:
raise ValidationError(
'For token classification probabilities are not supported'
)
if predictions is not None:
format_error_message = (
'Check requires token-classification predictions for '
f'the "{dataset.name}" dataset to be of a type '
'sequence[sequence[str]] or sequence[sequence[int]]'
)
if not is_sequence_not_str(predictions):
raise ValidationError(format_error_message)
if len(predictions) != dataset.n_samples:
raise ValidationError(
'Check requires token-classification predictions for '
f'the "{dataset.name}" dataset to have {dataset.n_samples} rows, '
'same as dataset'
)
for idx, sample_predictions in enumerate(predictions):
if not is_sequence_not_str(sample_predictions):
raise ValidationError(format_error_message)
predictions_types_counter = _count_types(sample_predictions)
criterias = (str in predictions_types_counter, int in predictions_types_counter)
if all(criterias) or not any(criterias):
raise ValidationError(format_error_message)
tokenized_text = dataset.tokenized_text
if len(sample_predictions) != len(tokenized_text[idx]):
raise ValidationError(
'Check requires token-classification predictions for '
f'the "{dataset.name}" dataset to have the same number of tokens '
'as the input text'
) | null |
455 | import typing as t
from collections.abc import Sequence
from seqeval.metrics import accuracy_score, f1_score, precision_score, recall_score
from seqeval.scheme import Token
from sklearn.metrics import make_scorer
from deepchecks.core.errors import DeepchecksValueError
def get_scorer_dict(
suffix: bool = False,
mode: t.Optional[str] = None,
scheme: t.Optional[t.Type[Token]] = None,
) -> t.Dict[str, t.Callable[[t.List[str], t.List[str]], float]]:
"""Return a dict of scorers for token classification.
Parameters
----------
mode: str, [None (default), `strict`].
if ``None``, the score is compatible with conlleval.pl. Otherwise,
the score is calculated strictly.
scheme: Token, [IOB2, IOE2, IOBES]
suffix: bool, False by default.
Returns
-------
A dict of scorers.
"""
common_kwargs = {
'mode': mode,
'scheme': scheme,
'suffix': suffix,
'zero_division': 0,
}
return {
'accuracy': make_token_scorer(accuracy_score, **common_kwargs),
'f1_per_class': make_token_scorer(f1_score, **common_kwargs, average=None),
'f1_macro': make_token_scorer(f1_score, **common_kwargs, average='macro'),
'f1_micro': make_token_scorer(f1_score, **common_kwargs, average='micro'),
'precision_per_class': make_token_scorer(precision_score, **common_kwargs, average=None),
'precision_macro': make_token_scorer(precision_score, **common_kwargs, average='macro'),
'precision_micro': make_token_scorer(precision_score, **common_kwargs, average='micro'),
'recall_per_class': make_token_scorer(recall_score, **common_kwargs, average=None),
'recall_macro': make_token_scorer(recall_score, **common_kwargs, average='macro'),
'recall_micro': make_token_scorer(recall_score, **common_kwargs, average='micro'),
}
class DeepchecksValueError(DeepchecksBaseError):
"""Exception class that represent a fault parameter was passed to Deepchecks."""
pass
The provided code snippet includes necessary dependencies for implementing the `validate_scorers` function. Write a Python function `def validate_scorers(scorers: t.List[str])` to solve the following problem:
Validate the given scorer list.
Here is the function:
def validate_scorers(scorers: t.List[str]):
"""Validate the given scorer list."""
scoring_dict = get_scorer_dict()
if not isinstance(scorers, Sequence):
raise DeepchecksValueError(f'Scorers must be a Sequence, got {type(scorers)}')
for name in scorers:
if not isinstance(name, str):
# TODO: support custom scorers
raise DeepchecksValueError(
f'Scorers must be a Sequence of strings, got {type(name)}'
)
if name not in scoring_dict:
raise DeepchecksValueError(
'Scorers must be a list of names of existing token classification metrics, '
f'which is {scoring_dict.keys()}, got {scorers}'
) | Validate the given scorer list. |
456 | import typing as t
from collections.abc import Sequence
from seqeval.metrics import accuracy_score, f1_score, precision_score, recall_score
from seqeval.scheme import Token
from sklearn.metrics import make_scorer
from deepchecks.core.errors import DeepchecksValueError
DEFAULT_AVG_SCORER_NAMES = ('f1_macro', 'recall_macro', 'precision_macro')
DEFAULT_PER_CLASS_SCORER_NAMES = ('f1_per_class', 'precision_per_class', 'recall_per_class')
The provided code snippet includes necessary dependencies for implementing the `get_default_token_scorers` function. Write a Python function `def get_default_token_scorers(use_avg_defaults=True) -> t.List[str]` to solve the following problem:
Return the default scorers for token classification.
Here is the function:
def get_default_token_scorers(use_avg_defaults=True) -> t.List[str]:
"""Return the default scorers for token classification."""
return list(
DEFAULT_AVG_SCORER_NAMES
if use_avg_defaults
else DEFAULT_PER_CLASS_SCORER_NAMES
) | Return the default scorers for token classification. |
457 | import typing as t
import numpy as np
from deepchecks.nlp.task_type import TaskType
from deepchecks.nlp.text_data import TextData
from deepchecks.tabular.metric_utils import DeepcheckScorer
from deepchecks.tabular.metric_utils.scorers import _transform_to_multi_label_format
from deepchecks.utils.metrics import is_label_none
from deepchecks.utils.typing import ClassificationModel
The provided code snippet includes necessary dependencies for implementing the `init_validate_scorers` function. Write a Python function `def init_validate_scorers(scorers: t.Union[t.Mapping[str, t.Union[str, t.Callable]], t.List[str]], model_classes: t.Optional[t.List], observed_classes: t.Optional[t.List]) -> t.List[DeepcheckScorer]` to solve the following problem:
Initialize scorers and return all of them as deepchecks scorers. Parameters ---------- scorers : Mapping[str, Union[str, Callable]] dict of scorers names to scorer sklearn_name/function or a list without a name model_classes : t.Optional[t.List] possible classes output for model. None for regression tasks. observed_classes : t.Optional[t.List] observed classes from labels and predictions. None for regression tasks. Returns ------- t.List[DeepcheckScorer] A list of initialized scorers
Here is the function:
def init_validate_scorers(scorers: t.Union[t.Mapping[str, t.Union[str, t.Callable]], t.List[str]],
model_classes: t.Optional[t.List],
observed_classes: t.Optional[t.List]) -> t.List[DeepcheckScorer]:
"""Initialize scorers and return all of them as deepchecks scorers.
Parameters
----------
scorers : Mapping[str, Union[str, Callable]]
dict of scorers names to scorer sklearn_name/function or a list without a name
model_classes : t.Optional[t.List]
possible classes output for model. None for regression tasks.
observed_classes : t.Optional[t.List]
observed classes from labels and predictions. None for regression tasks.
Returns
-------
t.List[DeepcheckScorer]
A list of initialized scorers
"""
if isinstance(scorers, t.Mapping):
scorers: t.List[DeepcheckScorer] = [DeepcheckScorer(scorer, model_classes, observed_classes, name)
for name, scorer in scorers.items()]
else:
scorers: t.List[DeepcheckScorer] = [DeepcheckScorer(scorer, model_classes, observed_classes)
for scorer in scorers]
return scorers | Initialize scorers and return all of them as deepchecks scorers. Parameters ---------- scorers : Mapping[str, Union[str, Callable]] dict of scorers names to scorer sklearn_name/function or a list without a name model_classes : t.Optional[t.List] possible classes output for model. None for regression tasks. observed_classes : t.Optional[t.List] observed classes from labels and predictions. None for regression tasks. Returns ------- t.List[DeepcheckScorer] A list of initialized scorers |
458 | import typing as t
import numpy as np
from deepchecks.nlp.task_type import TaskType
from deepchecks.nlp.text_data import TextData
from deepchecks.tabular.metric_utils import DeepcheckScorer
from deepchecks.tabular.metric_utils.scorers import _transform_to_multi_label_format
from deepchecks.utils.metrics import is_label_none
from deepchecks.utils.typing import ClassificationModel
class TaskType(Enum):
"""Enum containing supported task types."""
TEXT_CLASSIFICATION = 'text_classification'
TOKEN_CLASSIFICATION = 'token_classification'
OTHER = 'other'
class TextData:
"""
TextData wraps together the raw text data and the labels for the nlp task.
The TextData class contains metadata and methods intended for easily accessing
metadata relevant for the training or validating of ML models.
Parameters
----------
raw_text : t.Sequence[str], default: None
The raw text data, a sequence of strings representing the raw text of each sample.
If not given, tokenized_text must be given, and raw_text will be created from it by joining the tokens with
spaces.
tokenized_text : t.Sequence[t.Sequence[str]], default: None
The tokenized text data, a sequence of sequences of strings representing the tokenized text of each sample.
Only relevant for task_type 'token_classification'.
If not given, raw_text must be given, and tokenized_text will be created from it by splitting the text by
spaces.
label : t.Optional[TTextLabel], default: None
The label for the text data. Can be either a text_classification label or a token_classification label.
If None, the label is not set.
- text_classification label - For text classification the accepted label format differs between multilabel and
single label cases. For single label data, the label should be passed as a sequence of labels, with one entry
per sample that can be either a string or an integer. For multilabel data, the label should be passed as a
sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of
the i-th label in that sample.
- token_classification label - For token classification the accepted label format is the IOB format or similar
to it. The Label must be a sequence of sequences of strings or integers, with each sequence corresponding to
a sample in the tokenized text, and exactly the length of the corresponding tokenized text.
task_type : str, default: None
The task type for the text data. Can be either 'text_classification' or 'token_classification'. Must be set if
label is provided.
name : t.Optional[str] , default: None
The name of the dataset. If None, the dataset name will be defined when running it within a check.
metadata : t.Optional[t.Union[pd.DataFrame, str]] , default: None
Metadata for the samples. Metadata must be given as a pandas DataFrame or a path to a pandas
DataFrame compatible csv file, with the rows representing each sample
and columns representing the different metadata columns. If None, no metadata is set.
The number of rows in the metadata DataFrame must be equal to the number of samples in the dataset, and the
order of the rows must be the same as the order of the samples in the dataset.
For more on metadata, see the `NLP Metadata Guide
<https://docs.deepchecks.com/stable/nlp/usage_guides/nlp_metadata.html>`_.
categorical_metadata : t.Optional[t.List[str]] , default: None
The names of the categorical metadata columns. If None, categorical metadata columns are automatically inferred.
Only relevant if metadata is not None.
properties : t.Optional[t.Union[pd.DataFrame, str]] , default: None
The text properties for the samples. Properties must be given as either a pandas DataFrame or a path to a pandas
DataFrame compatible csv file, with the rows representing each sample and columns representing the different
properties. If None, no properties are set.
The number of rows in the properties DataFrame must be equal to the number of samples in the dataset, and the
order of the rows must be the same as the order of the samples in the dataset.
In order to calculate the default properties, use the `TextData.calculate_builtin_properties` function after
the creation of the TextData object.
For more on properties, see the `NLP Properties Guide
<https://docs.deepchecks.com/stable/nlp/usage_guides/nlp_properties.html>`_.
categorical_properties : t.Optional[t.List[str]] , default: None
The names of the categorical properties columns. Should be given only for custom properties, not for
any of the built-in properties. If None, categorical properties columns are automatically inferred for custom
properties.
embeddings : t.Optional[Union[np.ndarray, pd.DataFrame, str]], default: None
The text embeddings for the samples. Embeddings must be given as a numpy array (or a path to an .npy
file containing a numpy array) of shape (N, E), where N is the number of samples in the TextData object and E
is the number of embeddings dimensions.
The numpy array must be in the same order as the samples in the TextData.
If None, no embeddings are set.
In order to use the built-in embeddings, use the `TextData.calculate_builtin_embeddings` function after
the creation of the TextData object.
For more on embeddings, see the :ref:`Text Embeddings Guide <nlp__embeddings_guide>`
"""
_text: np.ndarray
_label: TTextLabel
task_type: t.Optional[TaskType]
_tokenized_text: t.Optional[t.Sequence[t.Sequence[str]]] = None # Outer sequence is np array
name: t.Optional[str] = None
_embeddings: t.Optional[t.Union[pd.DataFrame, str]] = None
_metadata: t.Optional[t.Union[pd.DataFrame, str]] = None
_properties: t.Optional[t.Union[pd.DataFrame, str]] = None
_cat_properties: t.Optional[t.List[str]] = None
_cat_metadata: t.Optional[t.List[str]] = None
_numeric_metadata: t.Optional[t.List[str]] = None
_original_text_index: t.Optional[t.Sequence[int]] = None # Sequence is np array
def __init__(
self,
raw_text: t.Optional[t.Sequence[str]] = None,
tokenized_text: t.Optional[t.Sequence[t.Sequence[str]]] = None,
label: t.Optional[TTextLabel] = None,
task_type: t.Optional[str] = None,
name: t.Optional[str] = None,
embeddings: t.Optional[t.Union[pd.DataFrame, np.ndarray, str]] = None,
metadata: t.Optional[pd.DataFrame] = None,
categorical_metadata: t.Optional[t.List[str]] = None,
properties: t.Optional[pd.DataFrame] = None,
categorical_properties: t.Optional[t.List[str]] = None,
):
# Require explicitly setting task type if label is provided
if task_type in [None, 'other']:
if label is not None:
raise DeepchecksValueError('task_type must be set when label is provided')
self._task_type = TaskType.OTHER
elif task_type == 'text_classification':
self._task_type = TaskType.TEXT_CLASSIFICATION
elif task_type == 'token_classification':
if tokenized_text is None:
raise DeepchecksValueError('tokenized_text must be provided for token_classification task type')
validate_tokenized_text(tokenized_text)
modified = [[str(token) for token in tokens_per_sample] for tokens_per_sample in tokenized_text]
self._tokenized_text = np.asarray(modified, dtype=object)
self._task_type = TaskType.TOKEN_CLASSIFICATION
else:
raise DeepchecksNotSupportedError(f'task_type {task_type} is not supported, must be one of '
'text_classification, token_classification, other')
if raw_text is None:
if tokenized_text is None:
raise DeepchecksValueError('Either raw_text or tokenized_text must be provided')
self._text = np.asarray([' '.join(tokens) for tokens in tokenized_text]) # Revisit this decision
else:
validate_raw_text(raw_text)
self._text = np.asarray([str(x) for x in raw_text])
if tokenized_text is not None and len(raw_text) != len(tokenized_text):
raise DeepchecksValueError('raw_text and tokenized_text sequences must have the same length')
self._label = validate_modify_label(label, self._task_type, len(self), tokenized_text)
if name is not None and not isinstance(name, str):
raise DeepchecksNotSupportedError(f'name must be a string, got {type(name)}')
self.name = name
if metadata is not None:
self.set_metadata(metadata, categorical_metadata)
if properties is not None:
self.set_properties(properties, categorical_properties)
if embeddings is not None:
self.set_embeddings(embeddings)
# Used for display purposes
self._original_text_index = np.arange(len(self))
def is_multi_label_classification(self) -> bool:
"""Check if the dataset is multi-label."""
if self.task_type == TaskType.TEXT_CLASSIFICATION and self._label is not None:
return is_sequence_not_str(self._label[0])
return False
# pylint: disable=protected-access
def copy(self: TDataset, rows_to_use: t.Optional[t.Sequence[int]] = None) -> TDataset:
"""Create a copy of this Dataset with new data.
Parameters
----------
rows_to_use : t.Optional[t.List[int]] , default: None
The rows to use in the new copy. If None, the new copy will contain all the rows.
"""
cls = type(self)
# NOTE:
# Make sure we won't get the warning for setting class in the non multilabel case
with disable_deepchecks_logger():
if rows_to_use is None:
new_copy = cls(
raw_text=self._text,
tokenized_text=self._tokenized_text,
label=self._label,
task_type=self._task_type.value,
name=self.name
)
if self._metadata is not None:
new_copy.set_metadata(self._metadata, self._cat_metadata)
if self._properties is not None:
new_copy.set_properties(self._properties, self._cat_properties)
if self._embeddings is not None:
new_copy.set_embeddings(self._embeddings)
new_copy._original_text_index = self._original_text_index
return new_copy
if not isinstance(rows_to_use, t.Sequence) or any(not isinstance(x, Number) for x in rows_to_use):
raise DeepchecksValueError('rows_to_use must be a list of integers')
rows_to_use = sorted(rows_to_use)
new_copy = cls(
raw_text=self._text[rows_to_use],
tokenized_text=(
self._tokenized_text[rows_to_use]
if self._tokenized_text is not None
else None
),
label=self._label[rows_to_use] if self.has_label() else None,
task_type=self._task_type.value, name=self.name
)
if self._metadata is not None:
metadata = self._metadata.iloc[rows_to_use, :]
new_copy.set_metadata(metadata, self._cat_metadata)
if self._properties is not None:
properties = self._properties.iloc[rows_to_use, :]
new_copy.set_properties(properties, self._cat_properties)
if self._embeddings is not None:
embeddings = self._embeddings[rows_to_use]
new_copy.set_embeddings(embeddings)
new_copy._original_text_index = self._original_text_index[rows_to_use]
return new_copy
def sample(self: TDataset, n_samples: int, replace: bool = False, random_state: t.Optional[int] = None,
drop_na_label: bool = False) -> TDataset:
"""Create a copy of the dataset object, with the internal data being a sample of the original data.
Parameters
----------
n_samples : int
Number of samples to draw.
replace : bool, default: False
Whether to sample with replacement.
random_state : t.Optional[int] , default None
Random state.
drop_na_label : bool, default: False
Whether to take sample only from rows with exiting label.
Returns
-------
Dataset
instance of the Dataset with sampled internal dataframe.
"""
samples_to_choose_from = np.arange(len(self))
if drop_na_label and self.has_label():
samples_to_choose_from = samples_to_choose_from[[not is_label_none(x) for x in self._label]]
n_samples = min(n_samples, len(samples_to_choose_from))
np.random.seed(random_state)
sample_idx = np.random.choice(samples_to_choose_from, n_samples, replace=replace)
return self.copy(rows_to_use=sorted(sample_idx))
def __len__(self) -> int:
"""Return number of samples in the dataset."""
return self.n_samples
def n_samples(self) -> int:
"""Return number of samples in the dataset."""
if self._text is not None:
return len(self._text)
elif self._label is not None:
return len(self._label)
else:
return 0
def embeddings(self) -> pd.DataFrame:
"""Return the embeddings of for the dataset."""
if self._embeddings is None:
raise DeepchecksValueError(
'Functionality requires embeddings, but the the TextData object had none. To use this functionality, '
'use the set_embeddings method to set your own embeddings with a numpy.array or use '
'TextData.calculate_builtin_embeddings to add the default deepchecks embeddings.'
)
return self._embeddings
def calculate_builtin_embeddings(self, model: str = 'miniLM', file_path: str = 'embeddings.npy',
device: t.Optional[str] = None, long_sample_behaviour: str = 'average+warn',
open_ai_batch_size: int = 500):
"""Calculate the built-in embeddings of the dataset.
Parameters
----------
model : str, default: 'miniLM'
The model to use for calculating the embeddings. Possible values are:
'miniLM': using the miniLM model in the sentence-transformers library.
'open_ai': using the ADA model in the open_ai library. Requires an API key.
file_path : str, default: 'embeddings.npy'
The path to save the embeddings to.
device : str, default: None
The device to use for calculating the embeddings. If None, the default device will be used.
long_sample_behaviour : str, default 'average+warn'
How to handle long samples. Averaging is done as described in
https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb
Currently, applies only to the 'open_ai' model, as the 'miniLM' model can handle long samples.
Options are:
- 'average+warn' (default): average the embeddings of the chunks and warn if the sample is too long.
- 'average': average the embeddings of the chunks.
- 'truncate': truncate the sample to the maximum length.
- 'raise': raise an error if the sample is too long.
- 'nan': return an embedding vector of nans for each sample that is too long.
open_ai_batch_size : int, default 500
The amount of samples to send to open ai in each batch. Reduce if getting errors from open ai.
"""
if self._embeddings is not None:
warnings.warn('Embeddings already exist, overwriting them', UserWarning)
self._embeddings = calculate_builtin_embeddings(text=self.text, model=model, file_path=file_path, device=device,
long_sample_behaviour=long_sample_behaviour,
open_ai_batch_size=open_ai_batch_size)
def set_embeddings(self, embeddings: np.ndarray, verbose: bool = True):
"""Set the embeddings of the dataset.
Parameters
----------
embeddings : pd.DataFrame
Embeddings to set.
verbose : bool, default: True
Whether to print information about the process.
"""
if self._embeddings is not None and verbose is True:
warnings.warn('Embeddings already exist, overwriting it', UserWarning)
if isinstance(embeddings, pd.DataFrame):
embeddings = embeddings.to_numpy()
if isinstance(embeddings, str):
embeddings = np.load(embeddings)
if embeddings is not None:
validate_length_and_type_numpy_array(embeddings, 'Embeddings', len(self))
self._embeddings = embeddings
def metadata(self) -> pd.DataFrame:
"""Return the metadata of for the dataset."""
if self._metadata is None:
raise DeepchecksValueError(
'Functionality requires metadata, but the the TextData object had none. '
'To use this functionality, use the '
'set_metadata method to set your own metadata with a pandas.DataFrame.'
)
return self._metadata
def categorical_metadata(self) -> t.List[str]:
"""Return categorical metadata column names."""
return self._cat_metadata
def numerical_metadata(self) -> t.List[str]:
"""Return numeric metadata column names."""
return self._numeric_metadata
def set_metadata(
self,
metadata: pd.DataFrame,
categorical_metadata: t.Optional[t.Sequence[str]] = None
):
"""Set the metadata of the dataset."""
if self._metadata is not None:
warnings.warn('Metadata already exist, overwriting it', UserWarning)
if isinstance(metadata, str):
metadata = pd.read_csv(metadata)
column_types = validate_length_and_calculate_column_types(
data_table=metadata,
data_table_name='Metadata',
expected_size=len(self),
categorical_columns=categorical_metadata
)
self._metadata = metadata.reset_index(drop=True)
self._cat_metadata = column_types.categorical_columns
self._numeric_metadata = column_types.numerical_columns
def calculate_builtin_properties(
self,
include_properties: t.Optional[t.List[str]] = None,
ignore_properties: t.Optional[t.List[str]] = None,
include_long_calculation_properties: bool = False,
ignore_non_english_samples_for_english_properties: bool = True,
device: t.Optional[str] = None,
models_storage: t.Union[pathlib.Path, str, None] = None,
batch_size: t.Optional[int] = 16,
cache_models: bool = False,
use_onnx_models: bool = True,
):
"""Calculate the default properties of the dataset.
Parameters
----------
include_properties : List[str], default None
The properties to calculate. If None, all default properties will be calculated. Cannot be used
together with ignore_properties parameter. Available properties are:
['Text Length', 'Average Word Length', 'Max Word Length',
'% Special Characters', '% Punctuation', 'Language',
'Sentiment', 'Subjectivity', 'Toxicity', 'Fluency', 'Formality', 'Lexical Density', 'Unique Noun Count',
'Reading Ease', 'Average Words Per Sentence', 'URLs Count', Unique URLs Count', 'Email Address Count',
'Unique Email Address Count', 'Unique Syllables Count', 'Reading Time', 'Sentences Count',
'Average Syllable Length']
List of default properties are: ['Text Length', 'Average Word Length', 'Max Word Length',
'% Special Characters', '% Punctuation', 'Language', 'Sentiment', 'Subjectivity', 'Toxicity', 'Fluency',
'Formality', 'Lexical Density', 'Unique Noun Count', 'Reading Ease', 'Average Words Per Sentence']
To calculate all the default properties, the include_properties and ignore_properties parameters should
be None. If you pass either include_properties or ignore_properties then only the properties specified
in the list will be calculated or ignored.
Note that the properties ['Toxicity', 'Fluency', 'Formality', 'Language', 'Unique Noun Count'] may
take a long time to calculate. If include_long_calculation_properties is False, these properties will be
ignored, even if they are in the include_properties parameter.
ignore_properties : List[str], default None
The properties to ignore from the list of default properties. If None, no properties will be ignored and
all the default properties will be calculated. Cannot be used together with include_properties parameter.
include_long_calculation_properties : bool, default False
Whether to include properties that may take a long time to calculate. If False, these properties will be
ignored, unless they are specified in the include_properties parameter explicitly.
ignore_non_english_samples_for_english_properties : bool, default True
Whether to ignore samples that are not in English when calculating English properties. If False, samples
that are not in English will be calculated as well. This parameter is ignored when calculating non-English
properties.
English-Only properties WILL NOT work properly on non-English samples, and this parameter should be used
only when you are sure that all the samples are in English.
device : Optional[str], default None
The device to use for the calculation. If None, the default device will be used. For onnx based models it is
recommended to set device to None for optimized performance.
models_storage : Union[str, pathlib.Path, None], default None
A directory to store the models.
If not provided, models will be stored in `DEEPCHECKS_LIB_PATH/nlp/.nlp-models`.
Also, if a folder already contains relevant resources they are not re-downloaded.
batch_size : int, default 8
The batch size.
cache_models : bool, default False
If True, will store the models in device RAM memory. This will speed up the calculation for future calls.
use_onnx_models : bool, default True
If True, will use onnx gpu optimized models for the calculation. Requires the optimum[onnxruntime-gpu]
library to be installed as well as the availability of GPU.
"""
if self._properties is not None:
warnings.warn('Properties already exist, overwriting them', UserWarning)
properties, properties_types = calculate_builtin_properties(
list(self.text),
include_properties=include_properties,
ignore_properties=ignore_properties,
include_long_calculation_properties=include_long_calculation_properties,
ignore_non_english_samples_for_english_properties=ignore_non_english_samples_for_english_properties,
device=device,
models_storage=models_storage,
batch_size=batch_size,
cache_models=cache_models,
use_onnx_models=use_onnx_models,
)
self._properties = pd.DataFrame(properties, index=self.get_original_text_indexes())
self._cat_properties = [k for k, v in properties_types.items() if v == 'categorical']
def set_properties(
self,
properties: pd.DataFrame,
categorical_properties: t.Optional[t.Sequence[str]] = None
):
"""Set the properties of the dataset."""
if self._properties is not None:
warnings.warn('Properties already exist, overwriting them', UserWarning)
if categorical_properties is not None:
categories_not_in_data = set(categorical_properties).difference(properties.columns.tolist())
if not len(categories_not_in_data) == 0:
raise DeepchecksValueError(
f'The following columns does not exist in Properties - {list(categories_not_in_data)}'
)
if isinstance(properties, str):
properties = pd.read_csv(properties)
builtin_property_types = get_builtin_properties_types()
property_names = properties.columns.tolist()
intersection = set(builtin_property_types.keys()).intersection(property_names)
# Get column types for intersection properties
builtin_categorical_properties = [x for x in intersection if builtin_property_types[x] == 'categorical']
# Get column types for user properties
user_properties = list(set(property_names).difference(builtin_property_types.keys()))
if categorical_properties is None:
user_categorical_properties = None
else:
user_categorical_properties = list(set(categorical_properties).intersection(user_properties))
if len(user_properties) != 0:
column_types = validate_length_and_calculate_column_types(
data_table=properties[user_properties],
data_table_name='Properties',
expected_size=len(self),
categorical_columns=user_categorical_properties
)
else:
column_types = ColumnTypes([], [])
# merge the two categorical properties list into one ColumnTypes object
all_cat_properties = column_types.categorical_columns + builtin_categorical_properties
column_types = ColumnTypes(
categorical_columns=all_cat_properties,
numerical_columns=list(set(property_names).difference(all_cat_properties))
)
self._properties = properties.reset_index(drop=True)
self._cat_properties = column_types.categorical_columns
def save_properties(self, path: str):
"""Save the dataset properties to csv.
Parameters
----------
path : str
Path to save the properties to.
"""
if self._properties is None:
raise DeepchecksNotSupportedError(
'TextData does not contain properties, add them by using '
'"calculate_builtin_properties" or "set_properties" functions'
)
self._properties.to_csv(path, index=False)
def properties(self) -> pd.DataFrame:
"""Return the properties of the dataset."""
if self._properties is None:
raise DeepchecksNotSupportedError(
'Functionality requires properties, but the the TextData object had none. To use this functionality, '
'use the set_properties method to set your own properties with a pandas.DataFrame or use '
'TextData.calculate_builtin_properties to add the default deepchecks properties.'
)
return self._properties
def categorical_properties(self) -> t.List[str]:
"""Return categorical properties names."""
return self._cat_properties
def numerical_properties(self) -> t.List[str]:
"""Return numerical properties names."""
if self._properties is not None:
return [prop for prop in self._properties.columns if prop not in self._cat_properties]
else:
return []
def task_type(self) -> t.Optional[TaskType]:
"""Return the task type.
Returns
-------
t.Optional[TaskType]
Task type
"""
return self._task_type
def text(self) -> t.Sequence[str]:
"""Return sequence of raw text samples.
Returns
-------
t.Sequence[str]
Sequence of raw text samples.
"""
return self._text
def tokenized_text(self) -> t.Sequence[t.Sequence[str]]:
"""Return sequence of tokenized text samples.
Returns
-------
t.Sequence[t.Sequence[str]]
Sequence of tokenized text samples.
"""
if self._tokenized_text is None:
raise DeepchecksValueError('Tokenized text is not set, provide it when initializing the TextData object '
'to run the requested functionalities')
return self._tokenized_text
def label(self) -> TTextLabel:
"""Return the label defined in the dataset.
Returns
-------
TTextLabel
"""
if not self.has_label():
raise DeepchecksValueError('Label is not set, provide it when initializing the TextData object '
'to run the requested functionalities')
return self._label
def label_for_display(self, model_classes: list = None) -> TTextLabel:
"""Return the label defined in the dataset in a format that can be displayed.
Parameters
----------
model_classes : list, default None
List of classes names to use for multi-label display. Only used if the dataset is multi-label.
Returns
-------
TTextLabel
"""
if self.is_multi_label_classification():
ret_labels = [np.argwhere(x == 1).flatten().tolist() for x in self.label]
if model_classes:
ret_labels = [[model_classes[i] for i in x] for x in ret_labels]
return ret_labels
else:
return self.label
def label_for_print(self, model_classes: list = None) -> t.List[str]:
"""Return the label defined in the dataset in a format that can be printed nicely.
Parameters
----------
model_classes : list, default None
List of classes names to use for multi-label display. Only used if the dataset is multi-label.
Returns
-------
List[str]
"""
label_for_display = self.label_for_display(model_classes)
return [break_to_lines_and_trim(str(x)) for x in label_for_display]
def has_label(self) -> bool:
"""Return True if label was set.
Returns
-------
bool
True if label was set.
"""
return self._label is not None
def get_original_text_indexes(self) -> t.Sequence[int]:
"""Return the original indexes of the text samples.
Returns
-------
t.Sequence[int]
Original indexes of the text samples.
"""
assert self._original_text_index is not None, 'Internal Error'
return self._original_text_index
def get_sample_at_original_index(self, index: int) -> str:
"""Return the text sample at the original index.
Parameters
----------
index : int
Original index of the text sample.
Returns
-------
str
Text sample at the original index.
"""
locations_in_array = np.where(self._original_text_index == index)
if len(locations_in_array) == 0:
raise DeepchecksValueError('Original text index is not in sampled TextData object')
elif len(locations_in_array) > 1:
raise DeepchecksValueError('Original text index is not unique in sampled TextData object')
return self._text[int(locations_in_array[0])]
def cast_to_dataset(cls, obj: t.Any) -> 'TextData':
"""Verify Dataset or transform to Dataset.
Function verifies that provided value is a non-empty instance of Dataset,
otherwise raises an exception, but if the 'cast' flag is set to True it will
also try to transform provided value to the Dataset instance.
Parameters
----------
obj
value to verify
Raises
------
DeepchecksValueError
if the provided value is not a TextData instance;
if the provided value cannot be transformed into Dataset instance;
"""
if not isinstance(obj, cls):
raise DeepchecksValueError(f'{obj} is not a {cls.__name__} instance')
return obj.copy()
def validate_textdata_compatibility(self, other_text_data: 'TextData') -> bool:
"""Verify that all provided datasets share same label name and task types.
Parameters
----------
other_text_data : TextData
The other dataset TextData object to compare with.
Returns
-------
bool
True if provided dataset share same label name and task types.
"""
assert other_text_data is not None
if self.task_type != other_text_data.task_type:
return False
return True
def head(self, n_samples: int = 5, model_classes: list = None) -> pd.DataFrame:
"""Return a copy of the dataset as a pandas Dataframe with the first n_samples samples.
Parameters
----------
n_samples : int, default 5
Number of samples to return.
model_classes : list, default None
List of classes names to use for multi-label display. Only used if the dataset is multi-label.
Returns
-------
pd.DataFrame
A copy of the dataset as a pandas Dataframe with the first n_samples samples.
"""
if n_samples > len(self):
n_samples = len(self) - 1
result = pd.DataFrame({'text': self.text[:n_samples]}, index=self.get_original_text_indexes()[:n_samples])
if self.has_label():
result['label'] = self.label_for_display(model_classes=model_classes)[:n_samples]
if self._tokenized_text is not None:
result['tokenized_text'] = self.tokenized_text[:n_samples]
if self._metadata is not None:
result = result.join(self.metadata.loc[result.index])
return result
def len_when_sampled(self, n_samples: t.Optional[int]):
"""Return number of samples in the sampled dataframe this dataset is sampled with n_samples samples."""
if n_samples is None:
return self.n_samples
return min(self.n_samples, n_samples)
def is_sampled(self, n_samples: t.Optional[int]):
"""Return True if the dataset number of samples will decrease when sampled with n_samples samples."""
if n_samples is None:
return False
return self.n_samples > n_samples
def describe(self, n_properties_to_show: t.Optional[int] = 4, properties_to_show: t.Optional[t.List[str]] = None,
max_num_labels_to_show: t.Optional[int] = 5, model_classes: t.Optional[t.List[str]] = None):
"""Provide holistic view of the data.
Generates the following plots:
1. Label distribution
2. Statistics about the data such as number of samples, annotation ratio, list of metadata columns, list of
text properties and so on.
3. Property distribution for the text properties defined either by n_properties_to_show or properties_to_show
parameter.
Parameters
----------
n_properties_to_show : int, default: 4
Number of properties to consider for generating property distribution graphs. If properties_to_show
is provided, this value is ignored.
properties_to_show : List[str], default: None
List of property names to consider for generating property distribution graphs. If None, all the
properties are considered.
max_num_labels_to_show : int, default: 5
The threshold to display the maximum number of labels on the label distribution pie chart and
display rest of the labels under "Others" category.
model_classes : Optional[List[str]], default: None
List of classes names to use for multi-label display. Only used if the dataset is multi-label.
Returns
-------
Displays the Plotly Figure.
"""
prop_names = []
all_properties_data = pd.DataFrame()
if self._properties is None and properties_to_show is not None:
raise DeepchecksValueError('No properties exist!')
elif self._properties is not None:
if properties_to_show is not None:
prop_names = [prop for prop in properties_to_show if prop in self.properties.columns]
if len(prop_names) != len(properties_to_show):
raise DeepchecksValueError(f'{set(properties_to_show) - set(prop_names)} '
'properties does not exist in the TextData object')
else:
prop_names = list(self.properties.columns)[:n_properties_to_show]
all_properties_data = self.properties[prop_names]
fig = text_data_describe_plot(properties=all_properties_data, n_samples=self.n_samples,
is_multi_label=self.is_multi_label_classification(), task_type=self.task_type,
categorical_metadata=self.categorical_metadata,
numerical_metadata=self.numerical_metadata,
categorical_properties=self.categorical_properties,
numerical_properties=self.numerical_properties, label=self._label,
model_classes=model_classes,
max_num_labels_to_show=max_num_labels_to_show)
return fig
def _transform_to_multi_label_format(y: np.ndarray, classes):
# Some classifiers like catboost might return shape like (n_rows, 1), therefore squeezing the array.
y = np.squeeze(y) if y.ndim > 1 else y
if y.ndim == 1:
kwargs = {'sparse_output': False} if version.parse(scikit_version) >= version.parse('1.2') \
else {'sparse': False}
ohe = OneHotEncoder(handle_unknown='ignore', **kwargs) # pylint: disable=unexpected-keyword-arg
ohe.fit(np.array(classes).reshape(-1, 1))
return ohe.transform(y.reshape(-1, 1))
# If after squeeze there are still 2 dimensions, then it must have column for each model class.
elif y.ndim == 2 and y.shape[1] == len(classes):
return y
else:
raise errors.DeepchecksValueError(f'got y with unworkable shape: {y.shape}. {SUPPORTED_MODELS_DOCLINK}')
def is_label_none(label):
"""Check if label (single label of a sample) is None."""
result = pd.isnull(label)
if isinstance(result, bool):
return result
return any(result)
class ClassificationModel(BasicModel, Protocol):
"""Traits of a classification model that are used by deepchecks."""
def predict_proba(self, X) -> List[Hashable]:
"""Predict probabilities on given X."""
...
The provided code snippet includes necessary dependencies for implementing the `infer_on_text_data` function. Write a Python function `def infer_on_text_data(scorer: DeepcheckScorer, model: ClassificationModel, data: TextData, drop_na: bool = True)` to solve the following problem:
Infer using DeepcheckScorer on NLP TextData using an NLP context _DummyModel.
Here is the function:
def infer_on_text_data(scorer: DeepcheckScorer, model: ClassificationModel, data: TextData, drop_na: bool = True):
"""Infer using DeepcheckScorer on NLP TextData using an NLP context _DummyModel."""
y_pred = model.predict(data)
y_true = data.label
if drop_na:
idx_to_keep = [not(is_label_none(pred) or is_label_none(label)) for pred, label in zip(y_pred, y_true)]
y_pred = np.asarray(y_pred, dtype='object')[idx_to_keep]
y_true = y_true[idx_to_keep]
if data.task_type == TaskType.TEXT_CLASSIFICATION:
y_pred = _transform_to_multi_label_format(y_pred, scorer.model_classes).astype(int)
y_true = _transform_to_multi_label_format(y_true, scorer.model_classes).astype(int)
if hasattr(model, 'predict_proba'):
y_proba = model.predict_proba(data)
if drop_na and y_proba is not None:
y_proba = np.asarray(y_proba, 'object')[idx_to_keep].astype(float)
else:
y_proba = None
results = scorer.run_on_pred(y_true, y_pred, y_proba)
return scorer.validate_scorer_multilabel_output(results) | Infer using DeepcheckScorer on NLP TextData using an NLP context _DummyModel. |
459 | import pathlib
import typing as t
import warnings
import numpy as np
import pandas as pd
from deepchecks.nlp import TextData
from deepchecks.utils.builtin_datasets_utils import read_and_save_data
def load_all_data() -> t.Dict[str, t.Dict[str, t.Any]]:
"""Load a dict of all the text data, labels and predictions. One function because it's very lightweight."""
return read_and_save_data(ASSETS_DIR, 'scierc_data_dict.json', _DATA_JSON_URL, file_type='json')
The provided code snippet includes necessary dependencies for implementing the `load_precalculated_predictions` function. Write a Python function `def load_precalculated_predictions() -> t.Tuple[t.List[str], t.List[str]]` to solve the following problem:
Load and return a precalculated predictions for the dataset. Returns ------- predictions : Tuple[List[str], List[str]] The IOB predictions of the tokens in the train and test datasets.
Here is the function:
def load_precalculated_predictions() -> t.Tuple[t.List[str], t.List[str]]:
"""Load and return a precalculated predictions for the dataset.
Returns
-------
predictions : Tuple[List[str], List[str]]
The IOB predictions of the tokens in the train and test datasets.
"""
data_dict = load_all_data()
return data_dict['train']['pred'], data_dict['test']['pred'] | Load and return a precalculated predictions for the dataset. Returns ------- predictions : Tuple[List[str], List[str]] The IOB predictions of the tokens in the train and test datasets. |
460 | import pathlib
import typing as t
import warnings
import numpy as np
import pandas as pd
from deepchecks.nlp import TextData
from deepchecks.utils.builtin_datasets_utils import read_and_save_data
def load_all_data() -> t.Dict[str, t.Dict[str, t.Any]]:
"""Load a dict of all the text data, labels and predictions. One function because it's very lightweight."""
return read_and_save_data(ASSETS_DIR, 'scierc_data_dict.json', _DATA_JSON_URL, file_type='json')
def load_embeddings() -> t.Tuple[np.array, np.array]:
"""Load and return the embeddings of the SCIERC dataset calculated by OpenAI.
Returns
-------
embeddings : np.Tuple[np.array, np.array]
Embeddings for the SCIERC dataset.
"""
train_embeddings = read_and_save_data(ASSETS_DIR, 'train_embeddings.npy', _TRAIN_EMBEDDINGS_URL,
file_type='npy', to_numpy=True)
test_embeddings = read_and_save_data(ASSETS_DIR, 'test_embeddings.npy', _TEST_EMBEDDINGS_URL,
file_type='npy', to_numpy=True)
return train_embeddings, test_embeddings
def load_properties() -> t.Tuple[pd.DataFrame, pd.DataFrame]:
"""Load and return the properties of the SCIERC dataset.
Returns
-------
properties : Tuple[pd.DataFrame, pd.DataFrame]
Properties for the SCIERC dataset.
"""
train_properties = read_and_save_data(ASSETS_DIR, 'train_properties.csv', _TRAIN_PROP, to_numpy=False,
include_index=False)
test_properties = read_and_save_data(ASSETS_DIR, 'test_properties.csv', _TEST_PROP, to_numpy=False,
include_index=False)
return train_properties, test_properties
The provided code snippet includes necessary dependencies for implementing the `load_data` function. Write a Python function `def load_data(data_format: str = 'TextData', include_properties: bool = True, include_embeddings: bool = False) -> t.Tuple[t.Union[TextData, pd.DataFrame], t.Union[TextData, pd.DataFrame]]` to solve the following problem:
Load and returns the SCIERC Abstract NER dataset (token classification). Parameters ---------- data_format : str, default: 'TextData' Represent the format of the returned value. Can be 'TextData'|'Dict' 'TextData' will return the data as a TextData object 'Dict' will return the data as a dict of tokenized texts and IOB NER labels include_properties : bool, default: True If True, the returned data will include properties of the comments. Incompatible with data_format='DataFrame' include_embeddings : bool, default: False If True, the returned data will include embeddings of the comments. Incompatible with data_format='DataFrame' Returns ------- train, test : Tuple[Union[TextData, Dict] Tuple of two objects represents the dataset split to train and test sets.
Here is the function:
def load_data(data_format: str = 'TextData', include_properties: bool = True, include_embeddings: bool = False) -> \
t.Tuple[t.Union[TextData, pd.DataFrame], t.Union[TextData, pd.DataFrame]]:
"""Load and returns the SCIERC Abstract NER dataset (token classification).
Parameters
----------
data_format : str, default: 'TextData'
Represent the format of the returned value. Can be 'TextData'|'Dict'
'TextData' will return the data as a TextData object
'Dict' will return the data as a dict of tokenized texts and IOB NER labels
include_properties : bool, default: True
If True, the returned data will include properties of the comments. Incompatible with data_format='DataFrame'
include_embeddings : bool, default: False
If True, the returned data will include embeddings of the comments. Incompatible with data_format='DataFrame'
Returns
-------
train, test : Tuple[Union[TextData, Dict]
Tuple of two objects represents the dataset split to train and test sets.
"""
if data_format.lower() not in ['textdata', 'dict']:
raise ValueError('data_format must be either "TextData" or "Dict"')
elif data_format.lower() == 'dict':
if include_properties or include_embeddings:
warnings.warn('include_properties and include_embeddings are incompatible with data_format="Dict". '
'loading only original text data',
UserWarning)
include_properties, include_embeddings = False, False
data = load_all_data()
train, test = data['train'], data['test']
# Delete the pred field of the dictionary
del train['pred']
del test['pred']
if data_format.lower() != 'textdata':
return train, test
if include_properties:
train_properties, test_properties = load_properties()
else:
train_properties, test_properties = None, None
if include_embeddings:
train_embeddings, test_embeddings = load_embeddings()
else:
train_embeddings, test_embeddings = None, None
train_ds = TextData(tokenized_text=train['text'], label=train['text'], task_type='token_classification',
properties=train_properties, embeddings=train_embeddings)
test_ds = TextData(tokenized_text=test['text'], label=test['text'], task_type='token_classification',
properties=test_properties, embeddings=test_embeddings)
return train_ds, test_ds | Load and returns the SCIERC Abstract NER dataset (token classification). Parameters ---------- data_format : str, default: 'TextData' Represent the format of the returned value. Can be 'TextData'|'Dict' 'TextData' will return the data as a TextData object 'Dict' will return the data as a dict of tokenized texts and IOB NER labels include_properties : bool, default: True If True, the returned data will include properties of the comments. Incompatible with data_format='DataFrame' include_embeddings : bool, default: False If True, the returned data will include embeddings of the comments. Incompatible with data_format='DataFrame' Returns ------- train, test : Tuple[Union[TextData, Dict] Tuple of two objects represents the dataset split to train and test sets. |
461 | import pathlib
import typing as t
import warnings
import numpy as np
import pandas as pd
from deepchecks.nlp import TextData
from deepchecks.utils.builtin_datasets_utils import read_and_save_data
_PREDICTIONS_URL = 'https://ndownloader.figshare.com/files/39264461'
ASSETS_DIR = pathlib.Path(__file__).absolute().parent.parent / 'assets' / 'tweet_emotion'
_LABEL_MAP = {0: 'anger', 1: 'happiness', 2: 'optimism', 3: 'sadness'}
def _get_train_test_indexes() -> t.Tuple[np.array, np.array]:
"""Get the indexes of the train and test sets."""
if (ASSETS_DIR / 'tweet_emotion_data.csv').exists():
dataset = pd.read_csv(ASSETS_DIR / 'tweet_emotion_data.csv', index_col=0,
usecols=['Unnamed: 0', 'train_test_split'])
else:
dataset = pd.read_csv(_FULL_DATA_URL, index_col=0, usecols=['Unnamed: 0', 'train_test_split'])
train_indexes = dataset[dataset['train_test_split'] == 'Train'].index
test_indexes = dataset[dataset['train_test_split'] == 'Test'].index
return train_indexes, test_indexes
def read_and_save_data(assets_dir, file_name, url_to_file, file_type='csv', to_numpy=False, include_index=True):
"""If the file exist reads it from the assets' directory, otherwise reads it from the url and saves it."""
os.makedirs(assets_dir, exist_ok=True)
if (assets_dir / file_name).exists():
if file_type == 'csv':
data = pd.read_csv(assets_dir / file_name, index_col=0 if include_index else None)
elif file_type == 'npy':
data = np.load(assets_dir / file_name)
elif file_type == 'json':
with open(assets_dir / file_name, 'r', encoding='utf-8') as f:
data = json.load(f)
else:
raise ValueError('file_type must be either "csv" or "npy"')
else:
if file_type == 'csv':
data = pd.read_csv(url_to_file, index_col=0 if include_index else None)
data.to_csv(assets_dir / file_name)
elif file_type == 'npy':
data = np.load(BytesIO(requests.get(url_to_file).content))
np.save(assets_dir / file_name, data)
elif file_type == 'json':
data = json.loads(requests.get(url_to_file).content)
with open(assets_dir / file_name, 'w', encoding='utf-8') as f:
json.dump(data, f)
else:
raise ValueError('file_type must be either "csv" or "npy"')
if to_numpy and (file_type in {'csv', 'npy'}):
if isinstance(data, pd.DataFrame):
data = data.to_numpy()
elif not isinstance(data, np.ndarray):
raise ValueError(f'Unknown data type - {type(data)}. Must be either pandas.DataFrame or numpy.ndarray')
elif to_numpy:
raise ValueError(f'Cannot convert {file_type} to numpy array')
return data
The provided code snippet includes necessary dependencies for implementing the `load_precalculated_predictions` function. Write a Python function `def load_precalculated_predictions(pred_format: str = 'predictions', as_train_test: bool = True) -> t.Union[np.array, t.Tuple[np.array, np.array]]` to solve the following problem:
Load and return a precalculated predictions for the dataset. Parameters ---------- pred_format : str, default: 'predictions' Represent the format of the returned value. Can be 'predictions' or 'probabilities'. 'predictions' will return the predicted class for each sample. 'probabilities' will return the predicted probabilities for each sample. as_train_test : bool, default: True If True, the returned data is split into train and test exactly like the toy model was trained. The first return value is the train data and the second is the test data. Otherwise, returns a single object. Returns ------- predictions : np.ndarray The prediction of the data elements in the dataset.
Here is the function:
def load_precalculated_predictions(pred_format: str = 'predictions', as_train_test: bool = True) -> \
t.Union[np.array, t.Tuple[np.array, np.array]]:
"""Load and return a precalculated predictions for the dataset.
Parameters
----------
pred_format : str, default: 'predictions'
Represent the format of the returned value. Can be 'predictions' or 'probabilities'.
'predictions' will return the predicted class for each sample.
'probabilities' will return the predicted probabilities for each sample.
as_train_test : bool, default: True
If True, the returned data is split into train and test exactly like the toy model
was trained. The first return value is the train data and the second is the test data.
Otherwise, returns a single object.
Returns
-------
predictions : np.ndarray
The prediction of the data elements in the dataset.
"""
all_preds = read_and_save_data(ASSETS_DIR, 'tweet_emotion_probabilities.csv', _PREDICTIONS_URL, to_numpy=True)
if pred_format == 'predictions':
all_preds = np.array([_LABEL_MAP[x] for x in np.argmax(all_preds, axis=1)])
elif pred_format != 'probabilities':
raise ValueError('pred_format must be either "predictions" or "probabilities"')
if as_train_test:
train_indexes, test_indexes = _get_train_test_indexes()
return all_preds[train_indexes], all_preds[test_indexes]
else:
return all_preds | Load and return a precalculated predictions for the dataset. Parameters ---------- pred_format : str, default: 'predictions' Represent the format of the returned value. Can be 'predictions' or 'probabilities'. 'predictions' will return the predicted class for each sample. 'probabilities' will return the predicted probabilities for each sample. as_train_test : bool, default: True If True, the returned data is split into train and test exactly like the toy model was trained. The first return value is the train data and the second is the test data. Otherwise, returns a single object. Returns ------- predictions : np.ndarray The prediction of the data elements in the dataset. |
462 | import pathlib
import typing as t
import warnings
import numpy as np
import pandas as pd
from deepchecks.nlp import TextData
from deepchecks.utils.builtin_datasets_utils import read_and_save_data
def load_data(data_format: str = 'TextData', as_train_test: bool = True,
include_properties: bool = True, include_embeddings: bool = False) -> \
t.Union[t.Tuple, t.Union[TextData, pd.DataFrame]]:
"""Load and returns the Tweet Emotion dataset (classification).
Parameters
----------
data_format : str, default: 'TextData'
Represent the format of the returned value. Can be 'TextData'|'DataFrame'
'TextData' will return the data as a TextData object
'Dataframe' will return the data as a pandas DataFrame object
as_train_test : bool, default: True
If True, the returned data is split into train and test exactly like the toy model
was trained. The first return value is the train data and the second is the test data.
In order to get this model, call the load_fitted_model() function.
Otherwise, returns a single object.
include_properties : bool, default: True
If True, the returned data will include the properties of the tweets. Incompatible with data_format='DataFrame'
include_embeddings : bool, default: True
If True, the returned data will include the embeddings of the tweets. Incompatible with data_format='DataFrame'
Returns
-------
dataset : Union[TextData, pd.DataFrame]
the data object, corresponding to the data_format attribute.
train, test : Tuple[Union[TextData, pd.DataFrame],Union[TextData, pd.DataFrame]
tuple if as_train_test = True. Tuple of two objects represents the dataset split to train and test sets.
"""
if data_format.lower() not in ['textdata', 'dataframe']:
raise ValueError('data_format must be either "TextData" or "Dataframe"')
elif data_format.lower() == 'dataframe':
if include_properties or include_embeddings:
warnings.warn('include_properties and include_embeddings are incompatible with data_format="Dataframe". '
'loading only original text data.',
UserWarning)
data = read_and_save_data(ASSETS_DIR, 'tweet_emotion_data.csv', _FULL_DATA_URL, to_numpy=False)
if not as_train_test:
data.drop(columns=['train_test_split'], inplace=True)
if data_format.lower() != 'textdata':
return data
metadata = data.drop(columns=[_target, 'text'])
properties = load_properties(as_train_test=False) if include_properties else None
embeddings = load_embeddings(as_train_test=False) if include_embeddings else None
dataset = TextData(data.text, label=data[_target], task_type='text_classification',
metadata=metadata, embeddings=embeddings, properties=properties,
categorical_metadata=_CAT_METADATA)
return dataset
else:
# train has more sport and Customer Complains but less Terror and Optimism
train = data[data['train_test_split'] == 'Train'].drop(columns=['train_test_split'])
test = data[data['train_test_split'] == 'Test'].drop(columns=['train_test_split'])
if data_format.lower() != 'textdata':
return train, test
train_metadata, test_metadata = train.drop(columns=[_target, 'text']), test.drop(columns=[_target, 'text'])
train_properties, test_properties = load_properties(as_train_test=True) if include_properties else (None, None)
train_embeddings, test_embeddings = load_embeddings(as_train_test=True) if include_embeddings else (None, None)
train_ds = TextData(train.text, label=train[_target], task_type='text_classification',
metadata=train_metadata, embeddings=train_embeddings, properties=train_properties,
categorical_metadata=_CAT_METADATA)
test_ds = TextData(test.text, label=test[_target], task_type='text_classification',
metadata=test_metadata, embeddings=test_embeddings, properties=test_properties,
categorical_metadata=_CAT_METADATA)
return train_ds, test_ds
The provided code snippet includes necessary dependencies for implementing the `load_under_annotated_data` function. Write a Python function `def load_under_annotated_data()` to solve the following problem:
Load and return the test data, modified to have under annotated segment.
Here is the function:
def load_under_annotated_data():
"""Load and return the test data, modified to have under annotated segment."""
_, test = load_data()
test_copy = test.copy()
# randomly remove 5% of the labels
np.random.seed(42)
idx_to_fillna = np.random.choice(range(len(test)), int(len(test) * 0.05), replace=False)
test_copy._label = test_copy._label.astype(dtype=object) # pylint: disable=protected-access
test_copy._label[idx_to_fillna] = None # pylint: disable=protected-access
# randomly remove 40% of the under annotated segments
np.random.seed(42)
under_annotated_segment_idx = test_copy.properties[
(test_copy.properties.Fluency < 0.4) & (test_copy.properties.Formality < 0.2)].index
idx_to_fillna = np.random.choice(under_annotated_segment_idx, int(len(under_annotated_segment_idx) * 0.4),
replace=False)
test_copy._label[idx_to_fillna] = None # pylint: disable=protected-access
return test_copy | Load and return the test data, modified to have under annotated segment. |
463 | import pathlib
import typing as t
import warnings
import numpy as np
import pandas as pd
from deepchecks.nlp import TextData
from deepchecks.utils.builtin_datasets_utils import read_and_save_data
_SHORT_PROBAS_URL = 'https://figshare.com/ndownloader/files/40578866'
ASSETS_DIR = pathlib.Path(__file__).absolute().parent.parent / 'assets' / 'just_dance_comment_analysis'
def _get_train_test_indexes(use_full_size: bool = False) -> t.Tuple[np.array, np.array]:
"""Get the indexes of the train and test sets."""
if use_full_size:
dataset = pd.read_csv(ASSETS_DIR / 'just_dance_data.csv', usecols=[_TIME_COL])
else:
dataset = pd.read_csv(ASSETS_DIR / 'just_dance_shorted_data.csv', usecols=[_TIME_COL])
train_indexes = dataset[dataset[_TIME_COL] < _DATE_TO_SPLIT_BY].index
test_indexes = dataset[dataset[_TIME_COL] >= _DATE_TO_SPLIT_BY].index
return train_indexes, test_indexes
def read_and_save_data(assets_dir, file_name, url_to_file, file_type='csv', to_numpy=False, include_index=True):
"""If the file exist reads it from the assets' directory, otherwise reads it from the url and saves it."""
os.makedirs(assets_dir, exist_ok=True)
if (assets_dir / file_name).exists():
if file_type == 'csv':
data = pd.read_csv(assets_dir / file_name, index_col=0 if include_index else None)
elif file_type == 'npy':
data = np.load(assets_dir / file_name)
elif file_type == 'json':
with open(assets_dir / file_name, 'r', encoding='utf-8') as f:
data = json.load(f)
else:
raise ValueError('file_type must be either "csv" or "npy"')
else:
if file_type == 'csv':
data = pd.read_csv(url_to_file, index_col=0 if include_index else None)
data.to_csv(assets_dir / file_name)
elif file_type == 'npy':
data = np.load(BytesIO(requests.get(url_to_file).content))
np.save(assets_dir / file_name, data)
elif file_type == 'json':
data = json.loads(requests.get(url_to_file).content)
with open(assets_dir / file_name, 'w', encoding='utf-8') as f:
json.dump(data, f)
else:
raise ValueError('file_type must be either "csv" or "npy"')
if to_numpy and (file_type in {'csv', 'npy'}):
if isinstance(data, pd.DataFrame):
data = data.to_numpy()
elif not isinstance(data, np.ndarray):
raise ValueError(f'Unknown data type - {type(data)}. Must be either pandas.DataFrame or numpy.ndarray')
elif to_numpy:
raise ValueError(f'Cannot convert {file_type} to numpy array')
return data
The provided code snippet includes necessary dependencies for implementing the `load_precalculated_predictions` function. Write a Python function `def load_precalculated_predictions(pred_format: str = 'predictions', as_train_test: bool = True, use_full_size: bool = False) -> t.Union[np.array, t.Tuple[np.array, np.array]]` to solve the following problem:
Load and return a precalculated predictions for the dataset. Parameters ---------- pred_format : str, default: 'predictions' Represent the format of the returned value. Can be 'predictions' or 'probabilities'. 'predictions' will return the predicted class for each sample. 'probabilities' will return the predicted probabilities for each sample. as_train_test : bool, default: True If True, the returned data is split into train and test exactly like the toy model was trained. The first return value is the train data and the second is the test data. Otherwise, returns a single object. use_full_size : bool, default: False If True, the returned data will be the full dataset, otherwise returns a subset of the data. Returns ------- predictions : np.ndarray The prediction of the data elements in the dataset.
Here is the function:
def load_precalculated_predictions(pred_format: str = 'predictions', as_train_test: bool = True,
use_full_size: bool = False) -> \
t.Union[np.array, t.Tuple[np.array, np.array]]:
"""Load and return a precalculated predictions for the dataset.
Parameters
----------
pred_format : str, default: 'predictions'
Represent the format of the returned value. Can be 'predictions' or 'probabilities'.
'predictions' will return the predicted class for each sample.
'probabilities' will return the predicted probabilities for each sample.
as_train_test : bool, default: True
If True, the returned data is split into train and test exactly like the toy model
was trained. The first return value is the train data and the second is the test data.
Otherwise, returns a single object.
use_full_size : bool, default: False
If True, the returned data will be the full dataset, otherwise returns a subset of the data.
Returns
-------
predictions : np.ndarray
The prediction of the data elements in the dataset.
"""
if use_full_size:
raise NotImplementedError('Predictions for the full dataset are not yet available.')
all_preds = read_and_save_data(ASSETS_DIR, 'just_dance_probabilities.csv', _SHORT_PROBAS_URL, to_numpy=True,
file_type='npy')
if pred_format == 'predictions':
all_preds = (np.array(all_preds) > 0.5)
all_preds = all_preds.astype(int)
elif pred_format != 'probabilities':
raise ValueError('pred_format must be either "predictions" or "probabilities"')
if as_train_test:
train_indexes, test_indexes = _get_train_test_indexes()
return all_preds[train_indexes], all_preds[test_indexes]
else:
return all_preds | Load and return a precalculated predictions for the dataset. Parameters ---------- pred_format : str, default: 'predictions' Represent the format of the returned value. Can be 'predictions' or 'probabilities'. 'predictions' will return the predicted class for each sample. 'probabilities' will return the predicted probabilities for each sample. as_train_test : bool, default: True If True, the returned data is split into train and test exactly like the toy model was trained. The first return value is the train data and the second is the test data. Otherwise, returns a single object. use_full_size : bool, default: False If True, the returned data will be the full dataset, otherwise returns a subset of the data. Returns ------- predictions : np.ndarray The prediction of the data elements in the dataset. |
464 | import pathlib
import typing as t
import warnings
import numpy as np
import pandas as pd
from deepchecks.nlp import TextData
from deepchecks.utils.builtin_datasets_utils import read_and_save_data
_FULL_DATA_URL = 'https://figshare.com/ndownloader/files/40564895'
_SHORT_DATA_URL = 'https://figshare.com/ndownloader/files/40576232'
ASSETS_DIR = pathlib.Path(__file__).absolute().parent.parent / 'assets' / 'just_dance_comment_analysis'
_METADATA_COLS = ['likes', 'dateComment']
_CAT_METADATA = []
_TEXT_COL = 'originalText'
_TIME_COL = 'dateComment'
def load_embeddings(as_train_test: bool = True, use_full_size: bool = False) -> \
t.Union[np.array, t.Tuple[np.array, np.array]]:
"""Load and return the embeddings of the just dance dataset calculated by OpenAI.
Parameters
----------
as_train_test : bool, default: True
If True, the returned data is split into train and test exactly like the toy model
was trained. The first return value is the train data and the second is the test data.
Otherwise, returns a single object.
use_full_size : bool, default: False
If True, the returned data will be the full dataset, otherwise returns a subset of the data.
Returns
-------
embeddings : np.ndarray
Embeddings for the just dance dataset.
"""
if use_full_size:
raise NotImplementedError('Embeddings for the full dataset are not yet available.')
all_embeddings = read_and_save_data(ASSETS_DIR, 'just_dance_embeddings.npy', _SHORT_EMBEDDINGS_URL,
file_type='npy', to_numpy=True)
if as_train_test:
train_indexes, test_indexes = _get_train_test_indexes(use_full_size)
return all_embeddings[train_indexes], all_embeddings[test_indexes]
else:
return all_embeddings
def load_properties(as_train_test: bool = True, use_full_size: bool = False) -> \
t.Union[pd.DataFrame, t.Tuple[pd.DataFrame, pd.DataFrame]]:
"""Load and return the properties of the just_dance dataset.
Parameters
----------
as_train_test : bool, default: True
If True, the returned data is split into train and test exactly like the toy model
was trained. The first return value is the train data and the second is the test data.
In order to get this model, call the load_fitted_model() function.
Otherwise, returns a single object.
use_full_size : bool, default: False
If True, the returned data will be the full dataset, otherwise returns a subset of the data.
Returns
-------
properties : pd.DataFrame
Properties for the just dance dataset.
"""
if use_full_size:
raise NotImplementedError('Properties for the full dataset are not yet available.')
properties = read_and_save_data(ASSETS_DIR, 'just_dance_properties.csv', _SHORT_PROPERTIES_URL, to_numpy=False)
if as_train_test:
train_indexes, test_indexes = _get_train_test_indexes(use_full_size)
return properties.loc[train_indexes], properties.loc[test_indexes]
else:
return properties
def _get_train_test_indexes(use_full_size: bool = False) -> t.Tuple[np.array, np.array]:
"""Get the indexes of the train and test sets."""
if use_full_size:
dataset = pd.read_csv(ASSETS_DIR / 'just_dance_data.csv', usecols=[_TIME_COL])
else:
dataset = pd.read_csv(ASSETS_DIR / 'just_dance_shorted_data.csv', usecols=[_TIME_COL])
train_indexes = dataset[dataset[_TIME_COL] < _DATE_TO_SPLIT_BY].index
test_indexes = dataset[dataset[_TIME_COL] >= _DATE_TO_SPLIT_BY].index
return train_indexes, test_indexes
def read_and_save_data(assets_dir, file_name, url_to_file, file_type='csv', to_numpy=False, include_index=True):
"""If the file exist reads it from the assets' directory, otherwise reads it from the url and saves it."""
os.makedirs(assets_dir, exist_ok=True)
if (assets_dir / file_name).exists():
if file_type == 'csv':
data = pd.read_csv(assets_dir / file_name, index_col=0 if include_index else None)
elif file_type == 'npy':
data = np.load(assets_dir / file_name)
elif file_type == 'json':
with open(assets_dir / file_name, 'r', encoding='utf-8') as f:
data = json.load(f)
else:
raise ValueError('file_type must be either "csv" or "npy"')
else:
if file_type == 'csv':
data = pd.read_csv(url_to_file, index_col=0 if include_index else None)
data.to_csv(assets_dir / file_name)
elif file_type == 'npy':
data = np.load(BytesIO(requests.get(url_to_file).content))
np.save(assets_dir / file_name, data)
elif file_type == 'json':
data = json.loads(requests.get(url_to_file).content)
with open(assets_dir / file_name, 'w', encoding='utf-8') as f:
json.dump(data, f)
else:
raise ValueError('file_type must be either "csv" or "npy"')
if to_numpy and (file_type in {'csv', 'npy'}):
if isinstance(data, pd.DataFrame):
data = data.to_numpy()
elif not isinstance(data, np.ndarray):
raise ValueError(f'Unknown data type - {type(data)}. Must be either pandas.DataFrame or numpy.ndarray')
elif to_numpy:
raise ValueError(f'Cannot convert {file_type} to numpy array')
return data
The provided code snippet includes necessary dependencies for implementing the `load_data` function. Write a Python function `def load_data(data_format: str = 'TextData', as_train_test: bool = True, use_full_size: bool = False, include_properties: bool = True, include_embeddings: bool = False) -> t.Union[t.Tuple, t.Union[TextData, pd.DataFrame]]` to solve the following problem:
Load and returns the Just Dance Comment Analysis dataset (multi-label classification). Parameters ---------- data_format : str, default: 'TextData' Represent the format of the returned value. Can be 'TextData'|'DataFrame' 'TextData' will return the data as a TextData object 'Dataframe' will return the data as a pandas DataFrame object as_train_test : bool, default: True If True, the returned data is split into train and test exactly like the toy model was trained. The first return value is the train data and the second is the test data. In order to get this model, call the load_fitted_model() function. Otherwise, returns a single object. use_full_size : bool, default: False If True, the returned data will be the full dataset, otherwise returns a subset of the data. include_properties : bool, default: True If True, the returned data will include properties of the comments. Incompatible with data_format='DataFrame' include_embeddings : bool, default: False If True, the returned data will include embeddings of the comments. Incompatible with data_format='DataFrame' Returns ------- dataset : Union[TextData, pd.DataFrame] the data object, corresponding to the data_format attribute. train, test : Tuple[Union[TextData, pd.DataFrame],Union[TextData, pd.DataFrame] tuple if as_train_test = True. Tuple of two objects represents the dataset split to train and test sets.
Here is the function:
def load_data(data_format: str = 'TextData', as_train_test: bool = True, use_full_size: bool = False,
include_properties: bool = True, include_embeddings: bool = False) -> \
t.Union[t.Tuple, t.Union[TextData, pd.DataFrame]]:
"""Load and returns the Just Dance Comment Analysis dataset (multi-label classification).
Parameters
----------
data_format : str, default: 'TextData'
Represent the format of the returned value. Can be 'TextData'|'DataFrame'
'TextData' will return the data as a TextData object
'Dataframe' will return the data as a pandas DataFrame object
as_train_test : bool, default: True
If True, the returned data is split into train and test exactly like the toy model
was trained. The first return value is the train data and the second is the test data.
In order to get this model, call the load_fitted_model() function.
Otherwise, returns a single object.
use_full_size : bool, default: False
If True, the returned data will be the full dataset, otherwise returns a subset of the data.
include_properties : bool, default: True
If True, the returned data will include properties of the comments. Incompatible with data_format='DataFrame'
include_embeddings : bool, default: False
If True, the returned data will include embeddings of the comments. Incompatible with data_format='DataFrame'
Returns
-------
dataset : Union[TextData, pd.DataFrame]
the data object, corresponding to the data_format attribute.
train, test : Tuple[Union[TextData, pd.DataFrame],Union[TextData, pd.DataFrame]
tuple if as_train_test = True. Tuple of two objects represents the dataset split to train and test sets.
"""
if data_format.lower() not in ['textdata', 'dataframe']:
raise ValueError('data_format must be either "TextData" or "Dataframe"')
elif data_format.lower() == 'dataframe':
if include_properties or include_embeddings:
warnings.warn('include_properties and include_embeddings are incompatible with data_format="Dataframe". '
'loading only original text data.',
UserWarning)
include_properties, include_embeddings = False, False
if use_full_size:
data = read_and_save_data(ASSETS_DIR, 'just_dance_data.csv', _FULL_DATA_URL, to_numpy=False,
include_index=False)
else:
data = read_and_save_data(ASSETS_DIR, 'just_dance_shorted_data.csv', _SHORT_DATA_URL, to_numpy=False)
data[_TIME_COL] = pd.to_datetime(data[_TIME_COL])
properties = load_properties(as_train_test=False, use_full_size=use_full_size) if include_properties else None
embeddings = load_embeddings(as_train_test=False, use_full_size=use_full_size) if include_embeddings else None
if not as_train_test:
if data_format.lower() != 'textdata':
return data
label = data.drop(columns=[_TEXT_COL] + _METADATA_COLS).to_numpy().astype(int)
dataset = TextData(data[_TEXT_COL], label=label, task_type='text_classification',
metadata=data[_METADATA_COLS], categorical_metadata=_CAT_METADATA,
properties=properties, embeddings=embeddings)
return dataset
else:
train_indexes, test_indexes = _get_train_test_indexes(use_full_size)
train, test = data.loc[train_indexes], data.loc[test_indexes]
if data_format.lower() != 'textdata':
return train, test
train_metadata, test_metadata = train[_METADATA_COLS], test[_METADATA_COLS]
label_train = train.drop(columns=[_TEXT_COL] + _METADATA_COLS).to_numpy().astype(int)
label_test = test.drop(columns=[_TEXT_COL] + _METADATA_COLS).to_numpy().astype(int)
if include_properties:
train_properties, test_properties = properties.loc[train.index], properties.loc[test.index]
else:
train_properties, test_properties = None, None
if include_embeddings:
train_embeddings = embeddings[train.index] # pylint: disable=unsubscriptable-object
test_embeddings = embeddings[test.index] # pylint: disable=unsubscriptable-object
else:
train_embeddings, test_embeddings = None, None
train_ds = TextData(train[_TEXT_COL], label=label_train, task_type='text_classification',
metadata=train_metadata, categorical_metadata=_CAT_METADATA,
properties=train_properties, embeddings=train_embeddings)
test_ds = TextData(test[_TEXT_COL], label=label_test, task_type='text_classification',
metadata=test_metadata, categorical_metadata=_CAT_METADATA,
properties=test_properties, embeddings=test_embeddings)
return train_ds, test_ds | Load and returns the Just Dance Comment Analysis dataset (multi-label classification). Parameters ---------- data_format : str, default: 'TextData' Represent the format of the returned value. Can be 'TextData'|'DataFrame' 'TextData' will return the data as a TextData object 'Dataframe' will return the data as a pandas DataFrame object as_train_test : bool, default: True If True, the returned data is split into train and test exactly like the toy model was trained. The first return value is the train data and the second is the test data. In order to get this model, call the load_fitted_model() function. Otherwise, returns a single object. use_full_size : bool, default: False If True, the returned data will be the full dataset, otherwise returns a subset of the data. include_properties : bool, default: True If True, the returned data will include properties of the comments. Incompatible with data_format='DataFrame' include_embeddings : bool, default: False If True, the returned data will include embeddings of the comments. Incompatible with data_format='DataFrame' Returns ------- dataset : Union[TextData, pd.DataFrame] the data object, corresponding to the data_format attribute. train, test : Tuple[Union[TextData, pd.DataFrame],Union[TextData, pd.DataFrame] tuple if as_train_test = True. Tuple of two objects represents the dataset split to train and test sets. |
465 | import contextlib
import pathlib
import typing as t
import warnings
from numbers import Number
import numpy as np
import pandas as pd
from deepchecks.core.errors import DeepchecksNotSupportedError, DeepchecksValueError
from deepchecks.nlp.input_validations import (ColumnTypes, validate_length_and_calculate_column_types,
validate_length_and_type_numpy_array, validate_modify_label,
validate_raw_text, validate_tokenized_text)
from deepchecks.nlp.task_type import TaskType, TTextLabel
from deepchecks.nlp.utils.text import break_to_lines_and_trim
from deepchecks.nlp.utils.text_data_plot import text_data_describe_plot
from deepchecks.nlp.utils.text_embeddings import calculate_builtin_embeddings
from deepchecks.nlp.utils.text_properties import calculate_builtin_properties, get_builtin_properties_types
from deepchecks.utils.logger import get_logger
from deepchecks.utils.metrics import is_label_none
from deepchecks.utils.validation import is_sequence_not_str
def get_logger() -> logging.Logger:
"""Retutn the deepchecks logger."""
return _logger
The provided code snippet includes necessary dependencies for implementing the `disable_deepchecks_logger` function. Write a Python function `def disable_deepchecks_logger()` to solve the following problem:
Disable deepchecks root logger.
Here is the function:
def disable_deepchecks_logger():
"""Disable deepchecks root logger."""
logger = get_logger()
logger_state = logger.disabled
logger.disabled = True
yield
logger.disabled = logger_state | Disable deepchecks root logger. |
466 | import warnings
from typing import Hashable, List, Optional, Union
import numpy as np
from deepchecks.core.errors import DeepchecksNotSupportedError, DeepchecksProcessError
from deepchecks.nlp import TextData
from deepchecks.utils.dataframes import select_from_dataframe
def _warn_n_top_columns(data_type: str, n_top_features: int):
"""Warn if n_top_columns is smaller than the number of segmenting features (metadata or properties)."""
if data_type == 'metadata':
features_name = 'metadata columns'
n_top_columns_parameter = 'n_top_columns'
columns_parameter = 'columns'
else:
features_name = 'properties'
n_top_columns_parameter = 'n_top_properties'
columns_parameter = 'properties'
warnings.warn(
f'Parameter {n_top_columns_parameter} is set to {n_top_features} to avoid long computation time. '
f'This means that the check will run on {n_top_features} {features_name} selected at random. '
f'If you want to run on all {features_name}, set {n_top_columns_parameter} to None. '
f'Alternatively, you can set parameter {columns_parameter} to a list of the specific {features_name} '
f'you want to run on.', UserWarning)
class Hashable(Protocol):
"""Trait for any hashable type that also defines comparison operators."""
def __hash__(self) -> int: # noqa: D105
...
def __le__(self, __value) -> bool: # noqa: D105
...
def __lt__(self, __value) -> bool: # noqa: D105
...
def __ge__(self, __value) -> bool: # noqa: D105
...
def __gt__(self, __value) -> bool: # noqa: D105
...
def __eq__(self, __value) -> bool: # noqa: D105
...
class DeepchecksNotSupportedError(DeepchecksBaseError):
"""Exception class that represents an unsupported action in Deepchecks."""
class DeepchecksProcessError(DeepchecksBaseError):
"""Exception class that represents an issue with a process."""
pass
def select_from_dataframe(
df: pd.DataFrame,
columns: t.Union[Hashable, t.List[Hashable], None] = None,
ignore_columns: t.Union[Hashable, t.List[Hashable], None] = None
) -> pd.DataFrame:
"""Filter DataFrame columns by given params.
Parameters
----------
df : pd.DataFrame
columns : t.Union[Hashable, t.List[Hashable]] , default: None
Column names to keep.
ignore_columns : t.Union[Hashable, t.List[Hashable]] , default: None
Column names to drop.
Returns
-------
pandas.DataFrame
returns horizontally filtered dataframe
Raises
------
DeepchecksValueError
If some columns do not exist within provided dataframe;
If 'columns' and 'ignore_columns' arguments are both not 'None'.
"""
if columns is not None and ignore_columns is not None:
raise DeepchecksValueError(
'Cannot receive both parameters "columns" and "ignore", '
'only one must be used at most'
)
elif columns is not None:
columns = ensure_hashable_or_mutable_sequence(columns)
validate_columns_exist(df, columns)
return t.cast(pd.DataFrame, df[columns])
elif ignore_columns is not None:
ignore_columns = ensure_hashable_or_mutable_sequence(ignore_columns)
validate_columns_exist(df, ignore_columns)
return df.drop(labels=ignore_columns, axis='columns')
else:
return df
The provided code snippet includes necessary dependencies for implementing the `get_relevant_data_table` function. Write a Python function `def get_relevant_data_table(text_data: TextData, data_type: str, columns: Union[Hashable, List[Hashable], None], ignore_columns: Union[Hashable, List[Hashable], None], n_top_features: Optional[int])` to solve the following problem:
Get relevant data table from the database.
Here is the function:
def get_relevant_data_table(text_data: TextData, data_type: str, columns: Union[Hashable, List[Hashable], None],
ignore_columns: Union[Hashable, List[Hashable], None], n_top_features: Optional[int]):
"""Get relevant data table from the database."""
if data_type == 'metadata':
relevant_metadata = text_data.metadata[text_data.categorical_metadata + text_data.numerical_metadata]
features = select_from_dataframe(relevant_metadata, columns, ignore_columns)
cat_features = [col for col in features.columns if col in text_data.categorical_metadata]
elif data_type == 'properties':
features = select_from_dataframe(text_data.properties, columns, ignore_columns)
cat_features = [col for col in features.columns if col in text_data.categorical_properties]
else:
raise DeepchecksProcessError(f'Unknown segment_by value: {data_type}')
if features.shape[1] < 2:
raise DeepchecksNotSupportedError('Check requires to have at least two '
f'{data_type} columns in order to run.')
if n_top_features is not None and n_top_features < features.shape[1]:
_warn_n_top_columns(data_type, n_top_features)
features = features.iloc[:, np.random.choice(features.shape[1], n_top_features, replace=False)]
return features, cat_features | Get relevant data table from the database. |
467 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
The provided code snippet includes necessary dependencies for implementing the `text_length` function. Write a Python function `def text_length(text: str) -> int` to solve the following problem:
Return text length.
Here is the function:
def text_length(text: str) -> int:
"""Return text length."""
return len(text) | Return text length. |
468 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
def _split_to_words_with_cache(text: str) -> List[str]:
"""Tokenize a text into words and cache the result."""
hash_key = hash_text(text)
if hash_key not in words_cache:
words = re.split(r'\W+', normalize_text(text, remove_stops=False, ignore_whitespace=False))
words = [w for w in words if w] # remove empty strings
words_cache[hash_key] = words
return words_cache[hash_key]
The provided code snippet includes necessary dependencies for implementing the `average_word_length` function. Write a Python function `def average_word_length(text: str) -> float` to solve the following problem:
Return average word length.
Here is the function:
def average_word_length(text: str) -> float:
"""Return average word length."""
words = _split_to_words_with_cache(text)
return np.mean([len(word) for word in words]) if words else 0 | Return average word length. |
469 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
NON_PUNCTUATION_SPECIAL_CHARS = frozenset(set(SPECIAL_CHARACTERS) - set(r"""!"#$%&'()*+,-./:;=?\@""")
- set(string.whitespace))
The provided code snippet includes necessary dependencies for implementing the `percentage_special_characters` function. Write a Python function `def percentage_special_characters(text: str) -> float` to solve the following problem:
Return percentage of special characters (as float between 0 and 1).
Here is the function:
def percentage_special_characters(text: str) -> float:
"""Return percentage of special characters (as float between 0 and 1)."""
return len([c for c in text if c in NON_PUNCTUATION_SPECIAL_CHARS]) / len(text) if len(text) != 0 else 0 | Return percentage of special characters (as float between 0 and 1). |
470 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
The provided code snippet includes necessary dependencies for implementing the `percentage_punctuation` function. Write a Python function `def percentage_punctuation(text: str) -> float` to solve the following problem:
Return percentage of punctuation (as float between 0 and 1).
Here is the function:
def percentage_punctuation(text: str) -> float:
"""Return percentage of punctuation (as float between 0 and 1)."""
return len([c for c in text if c in string.punctuation]) / len(text) if len(text) != 0 else 0 | Return percentage of punctuation (as float between 0 and 1). |
471 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
def _split_to_words_with_cache(text: str) -> List[str]:
"""Tokenize a text into words and cache the result."""
hash_key = hash_text(text)
if hash_key not in words_cache:
words = re.split(r'\W+', normalize_text(text, remove_stops=False, ignore_whitespace=False))
words = [w for w in words if w] # remove empty strings
words_cache[hash_key] = words
return words_cache[hash_key]
The provided code snippet includes necessary dependencies for implementing the `max_word_length` function. Write a Python function `def max_word_length(text: str) -> int` to solve the following problem:
Return max word length.
Here is the function:
def max_word_length(text: str) -> int:
"""Return max word length."""
words = _split_to_words_with_cache(text)
return max(len(w) for w in words) if words else 0 | Return max word length. |
472 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
def language(
text: str,
lang_certainty_threshold: float = 0.8,
fasttext_model: Optional[Dict[object, Any]] = None
) -> Union[str, None]:
"""Return text language, represented as a string."""
if not text:
return None
# Load the model if it wasn't received as a parameter. This is done to avoid loading the model
# each time the function is called.
if fasttext_model is None:
fasttext_model = get_fasttext_model()
# Predictions are the first prediction (k=1), only if the probability is above the threshold
prediction = fasttext_model.predict(text.replace('\n', ' '), k=1, threshold=lang_certainty_threshold)[0]
# label is empty for detection below threshold:
language_code = prediction[0].replace('__label__', '') if prediction else None
if language_code == 'eng': # both are english but different labels
return 'en'
return language_code
The provided code snippet includes necessary dependencies for implementing the `english_text` function. Write a Python function `def english_text( text: str, lang_certainty_threshold: float = 0.8, fasttext_model: Optional[Dict[object, Any]] = None, language_property_result: Optional[str] = None ) -> Union[bool, None]` to solve the following problem:
Return whether text is in English or not.
Here is the function:
def english_text(
text: str,
lang_certainty_threshold: float = 0.8,
fasttext_model: Optional[Dict[object, Any]] = None,
language_property_result: Optional[str] = None
) -> Union[bool, None]:
"""Return whether text is in English or not."""
if not text:
return None
if language_property_result is None:
language_property_result = language(text, lang_certainty_threshold, fasttext_model)
return language_property_result == 'en' | Return whether text is in English or not. |
473 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
textblob_cache = {}
def _sample_for_property(text: str, mode: str = 'words', limit: int = 10000, return_as_list=False,
random_seed: int = 42) -> Union[str, List[str]]:
"""Get a sample a single text sample for a text property.
Parameters
----------
text : str
The text to sample from.
mode : str, default 'words'
The mode to sample in. Can be either 'words' or 'sentences'.
limit : int, default 10000
The maximum number of words or sentences to sample.
"""
np.random.seed(random_seed)
if pd.isna(text):
return None
if mode == 'words':
all_units = _split_to_words_with_cache(text)
if len(all_units) > limit:
all_units = np.random.choice(all_units, size=limit, replace=False)
elif mode == 'sentences':
all_units = _split_to_sentences_with_cache(text)
if len(all_units) > limit:
all_units = np.random.choice(all_units, size=limit, replace=False)
else:
raise DeepchecksValueError(f'Unexpected mode - {mode}')
return ' '.join(all_units) if not return_as_list else list(all_units)
def sentiment(text: str) -> float:
"""Return float representing sentiment."""
hash_key = hash_text(text)
if textblob_cache.get(hash_key) is None:
# TextBlob uses only the words and not the relations between them, so we can sample the text
# to speed up the process:
words = _sample_for_property(text, mode='words')
textblob_cache[hash_key] = textblob.TextBlob(words).sentiment
return textblob_cache.get(hash_key).polarity
def hash_text(text: str) -> int:
"""Hash a text sample."""
assert isinstance(text, str)
return hash(text)
The provided code snippet includes necessary dependencies for implementing the `subjectivity` function. Write a Python function `def subjectivity(text: str) -> float` to solve the following problem:
Return float representing subjectivity.
Here is the function:
def subjectivity(text: str) -> float:
"""Return float representing subjectivity."""
hash_key = hash_text(text)
if textblob_cache.get(hash_key) is None:
# TextBlob uses only the words and not the relations between them, so we can sample the text
# to speed up the process:
words = _sample_for_property(text, mode='words')
textblob_cache[hash_key] = textblob.TextBlob(words).sentiment
return textblob_cache.get(hash_key).subjectivity | Return float representing subjectivity. |
474 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
def predict_on_batch(text_batch: Sequence[str], classifier,
output_formatter: Callable[[Dict[str, Any]], float]) -> Sequence[float]:
"""Return prediction of huggingface Pipeline classifier."""
# TODO: make this way smarter, and not just a hack. Count tokens, for a start. Then not just sample sentences.
# If text is longer than classifier context window, sample it:
text_list_to_predict = []
reduced_batch_size = len(text_batch) # Initialize the reduced batch size
retry_count = 0
for text in text_batch:
if len(text) > MAX_CHARS:
sentences = _sample_for_property(text, mode='sentences', limit=10, return_as_list=True)
text_to_use = ''
for sentence in sentences:
if len(text_to_use) + len(sentence) > MAX_CHARS:
break
text_to_use += sentence + '. '
# if even one sentence is too long, use part of the first one:
if len(text_to_use) == 0:
if len(sentences) > 0:
text_to_use = cut_string(sentences[0], MAX_CHARS)
else:
text_to_use = None
text_list_to_predict.append(text_to_use)
else:
text_list_to_predict.append(text)
while reduced_batch_size >= 1:
try:
if reduced_batch_size == 1 or retry_count == 3:
results = []
for text in text_list_to_predict:
if text is None:
results.append(np.nan)
else:
try:
v = classifier(text)[0]
results.append(output_formatter(v))
except Exception: # pylint: disable=broad-except
results.append(np.nan)
return results # Return the results if prediction is successful
v_list = classifier(text_list_to_predict, batch_size=reduced_batch_size)
results = []
for v in v_list:
results.append(output_formatter(v))
return results # Return the results if prediction is successful
except Exception: # pylint: disable=broad-except
reduced_batch_size = max(reduced_batch_size // 2, 1) # Reduce the batch size by half
retry_count += 1
return [np.nan] * len(text_batch) # Prediction failed, return NaN values for the original batch size
TOXICITY_CALIBRATOR = pathlib.Path(__file__).absolute().parent / 'assets' / 'toxicity_calibrator.pkl'
TOXICITY_MODEL_NAME = 'SkolkovoInstitute/roberta_toxicity_classifier'
TOXICITY_MODEL_NAME_ONNX = 'Deepchecks/roberta_toxicity_classifier_onnx'
def _validate_onnx_model_availability(use_onnx_models: bool, device: Optional[str]):
if not use_onnx_models:
return False
if find_spec('optimum') is None or find_spec('onnxruntime') is None:
warnings.warn('Onnx models require the optimum[onnxruntime-gpu] library to be installed. '
'Calculating using the default models.')
return False
if not torch.cuda.is_available():
warnings.warn('GPU is required for the onnx models. Calculating using the default models.')
return False
if device is not None and device.lower() == 'cpu':
warnings.warn('Onnx models are not supported on device CPU. Calculating using the default models.')
return False
return True
def get_transformer_pipeline(
property_name: str,
model_name: str,
device: Optional[str] = None,
models_storage: Union[pathlib.Path, str, None] = None,
use_onnx_model: bool = False,
use_cache=False
):
"""Return a transformers' pipeline for the given model name."""
if use_onnx_model and 'onnx' not in model_name.lower():
raise ValueError("use_onnx_model=True, but model_name is not for a 'onnx' model")
if use_cache:
model, tokenizer = _get_transformer_model_and_tokenizer(property_name, model_name,
models_storage, use_onnx_model)
else:
# __wrapped__ is simply the function without decoration, in our case - without caching
model, tokenizer = _get_transformer_model_and_tokenizer.__wrapped__(property_name, model_name,
models_storage, use_onnx_model)
if use_onnx_model:
onnx_pipe = import_optional_property_dependency('optimum.pipelines', property_name=property_name)
return onnx_pipe.pipeline('text-classification', model=model, tokenizer=tokenizer,
accelerator='ort', device=device)
else:
transformers = import_optional_property_dependency('transformers', property_name=property_name)
return transformers.pipeline('text-classification', model=model, tokenizer=tokenizer, device=device)
The provided code snippet includes necessary dependencies for implementing the `toxicity` function. Write a Python function `def toxicity( text_batch: Sequence[str], device: Optional[str] = None, models_storage: Union[pathlib.Path, str, None] = None, use_onnx_models: bool = True, toxicity_classifier: Optional[object] = None ) -> Sequence[float]` to solve the following problem:
Return float representing toxicity.
Here is the function:
def toxicity(
text_batch: Sequence[str],
device: Optional[str] = None,
models_storage: Union[pathlib.Path, str, None] = None,
use_onnx_models: bool = True,
toxicity_classifier: Optional[object] = None
) -> Sequence[float]:
"""Return float representing toxicity."""
if toxicity_classifier is None:
use_onnx_models = _validate_onnx_model_availability(use_onnx_models, device)
model_name = TOXICITY_MODEL_NAME_ONNX if use_onnx_models else TOXICITY_MODEL_NAME
toxicity_classifier = get_transformer_pipeline(
property_name='toxicity', model_name=model_name, device=device,
models_storage=models_storage, use_onnx_model=use_onnx_models)
class UnitModel:
"""A model that does nothing."""
@staticmethod
def predict(x):
return x
try:
with open(TOXICITY_CALIBRATOR, 'rb') as f:
toxicity_calibrator = pkl.load(f)
except Exception: # pylint: disable=broad-except
toxicity_calibrator = UnitModel()
def output_formatter(v):
score = v['score'] if (v['label'] == 'toxic') else 1 - v['score']
return toxicity_calibrator.predict([score])[0]
return predict_on_batch(text_batch, toxicity_classifier, output_formatter) | Return float representing toxicity. |
475 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
def predict_on_batch(text_batch: Sequence[str], classifier,
output_formatter: Callable[[Dict[str, Any]], float]) -> Sequence[float]:
"""Return prediction of huggingface Pipeline classifier."""
# TODO: make this way smarter, and not just a hack. Count tokens, for a start. Then not just sample sentences.
# If text is longer than classifier context window, sample it:
text_list_to_predict = []
reduced_batch_size = len(text_batch) # Initialize the reduced batch size
retry_count = 0
for text in text_batch:
if len(text) > MAX_CHARS:
sentences = _sample_for_property(text, mode='sentences', limit=10, return_as_list=True)
text_to_use = ''
for sentence in sentences:
if len(text_to_use) + len(sentence) > MAX_CHARS:
break
text_to_use += sentence + '. '
# if even one sentence is too long, use part of the first one:
if len(text_to_use) == 0:
if len(sentences) > 0:
text_to_use = cut_string(sentences[0], MAX_CHARS)
else:
text_to_use = None
text_list_to_predict.append(text_to_use)
else:
text_list_to_predict.append(text)
while reduced_batch_size >= 1:
try:
if reduced_batch_size == 1 or retry_count == 3:
results = []
for text in text_list_to_predict:
if text is None:
results.append(np.nan)
else:
try:
v = classifier(text)[0]
results.append(output_formatter(v))
except Exception: # pylint: disable=broad-except
results.append(np.nan)
return results # Return the results if prediction is successful
v_list = classifier(text_list_to_predict, batch_size=reduced_batch_size)
results = []
for v in v_list:
results.append(output_formatter(v))
return results # Return the results if prediction is successful
except Exception: # pylint: disable=broad-except
reduced_batch_size = max(reduced_batch_size // 2, 1) # Reduce the batch size by half
retry_count += 1
return [np.nan] * len(text_batch) # Prediction failed, return NaN values for the original batch size
FLUENCY_MODEL_NAME = 'prithivida/parrot_fluency_model'
FLUENCY_MODEL_NAME_ONNX = 'Deepchecks/parrot_fluency_model_onnx'
def _validate_onnx_model_availability(use_onnx_models: bool, device: Optional[str]):
if not use_onnx_models:
return False
if find_spec('optimum') is None or find_spec('onnxruntime') is None:
warnings.warn('Onnx models require the optimum[onnxruntime-gpu] library to be installed. '
'Calculating using the default models.')
return False
if not torch.cuda.is_available():
warnings.warn('GPU is required for the onnx models. Calculating using the default models.')
return False
if device is not None and device.lower() == 'cpu':
warnings.warn('Onnx models are not supported on device CPU. Calculating using the default models.')
return False
return True
def get_transformer_pipeline(
property_name: str,
model_name: str,
device: Optional[str] = None,
models_storage: Union[pathlib.Path, str, None] = None,
use_onnx_model: bool = False,
use_cache=False
):
"""Return a transformers' pipeline for the given model name."""
if use_onnx_model and 'onnx' not in model_name.lower():
raise ValueError("use_onnx_model=True, but model_name is not for a 'onnx' model")
if use_cache:
model, tokenizer = _get_transformer_model_and_tokenizer(property_name, model_name,
models_storage, use_onnx_model)
else:
# __wrapped__ is simply the function without decoration, in our case - without caching
model, tokenizer = _get_transformer_model_and_tokenizer.__wrapped__(property_name, model_name,
models_storage, use_onnx_model)
if use_onnx_model:
onnx_pipe = import_optional_property_dependency('optimum.pipelines', property_name=property_name)
return onnx_pipe.pipeline('text-classification', model=model, tokenizer=tokenizer,
accelerator='ort', device=device)
else:
transformers = import_optional_property_dependency('transformers', property_name=property_name)
return transformers.pipeline('text-classification', model=model, tokenizer=tokenizer, device=device)
The provided code snippet includes necessary dependencies for implementing the `fluency` function. Write a Python function `def fluency( text_batch: Sequence[str], device: Optional[str] = None, models_storage: Union[pathlib.Path, str, None] = None, use_onnx_models: bool = True, fluency_classifier: Optional[object] = None ) -> Sequence[float]` to solve the following problem:
Return float representing fluency.
Here is the function:
def fluency(
text_batch: Sequence[str],
device: Optional[str] = None,
models_storage: Union[pathlib.Path, str, None] = None,
use_onnx_models: bool = True,
fluency_classifier: Optional[object] = None
) -> Sequence[float]:
"""Return float representing fluency."""
if fluency_classifier is None:
use_onnx_models = _validate_onnx_model_availability(use_onnx_models, device)
model_name = FLUENCY_MODEL_NAME_ONNX if use_onnx_models else FLUENCY_MODEL_NAME
fluency_classifier = get_transformer_pipeline(
property_name='fluency', model_name=model_name, device=device,
models_storage=models_storage, use_onnx_model=use_onnx_models)
def output_formatter(v):
return v['score'] if v['label'] == 'LABEL_1' else 1 - v['score']
return predict_on_batch(text_batch, fluency_classifier, output_formatter) | Return float representing fluency. |
476 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
def predict_on_batch(text_batch: Sequence[str], classifier,
output_formatter: Callable[[Dict[str, Any]], float]) -> Sequence[float]:
"""Return prediction of huggingface Pipeline classifier."""
# TODO: make this way smarter, and not just a hack. Count tokens, for a start. Then not just sample sentences.
# If text is longer than classifier context window, sample it:
text_list_to_predict = []
reduced_batch_size = len(text_batch) # Initialize the reduced batch size
retry_count = 0
for text in text_batch:
if len(text) > MAX_CHARS:
sentences = _sample_for_property(text, mode='sentences', limit=10, return_as_list=True)
text_to_use = ''
for sentence in sentences:
if len(text_to_use) + len(sentence) > MAX_CHARS:
break
text_to_use += sentence + '. '
# if even one sentence is too long, use part of the first one:
if len(text_to_use) == 0:
if len(sentences) > 0:
text_to_use = cut_string(sentences[0], MAX_CHARS)
else:
text_to_use = None
text_list_to_predict.append(text_to_use)
else:
text_list_to_predict.append(text)
while reduced_batch_size >= 1:
try:
if reduced_batch_size == 1 or retry_count == 3:
results = []
for text in text_list_to_predict:
if text is None:
results.append(np.nan)
else:
try:
v = classifier(text)[0]
results.append(output_formatter(v))
except Exception: # pylint: disable=broad-except
results.append(np.nan)
return results # Return the results if prediction is successful
v_list = classifier(text_list_to_predict, batch_size=reduced_batch_size)
results = []
for v in v_list:
results.append(output_formatter(v))
return results # Return the results if prediction is successful
except Exception: # pylint: disable=broad-except
reduced_batch_size = max(reduced_batch_size // 2, 1) # Reduce the batch size by half
retry_count += 1
return [np.nan] * len(text_batch) # Prediction failed, return NaN values for the original batch size
FORMALITY_MODEL_NAME = 's-nlp/roberta-base-formality-ranker'
FORMALITY_MODEL_NAME_ONNX = 'Deepchecks/roberta_base_formality_ranker_onnx'
def _validate_onnx_model_availability(use_onnx_models: bool, device: Optional[str]):
if not use_onnx_models:
return False
if find_spec('optimum') is None or find_spec('onnxruntime') is None:
warnings.warn('Onnx models require the optimum[onnxruntime-gpu] library to be installed. '
'Calculating using the default models.')
return False
if not torch.cuda.is_available():
warnings.warn('GPU is required for the onnx models. Calculating using the default models.')
return False
if device is not None and device.lower() == 'cpu':
warnings.warn('Onnx models are not supported on device CPU. Calculating using the default models.')
return False
return True
def get_transformer_pipeline(
property_name: str,
model_name: str,
device: Optional[str] = None,
models_storage: Union[pathlib.Path, str, None] = None,
use_onnx_model: bool = False,
use_cache=False
):
"""Return a transformers' pipeline for the given model name."""
if use_onnx_model and 'onnx' not in model_name.lower():
raise ValueError("use_onnx_model=True, but model_name is not for a 'onnx' model")
if use_cache:
model, tokenizer = _get_transformer_model_and_tokenizer(property_name, model_name,
models_storage, use_onnx_model)
else:
# __wrapped__ is simply the function without decoration, in our case - without caching
model, tokenizer = _get_transformer_model_and_tokenizer.__wrapped__(property_name, model_name,
models_storage, use_onnx_model)
if use_onnx_model:
onnx_pipe = import_optional_property_dependency('optimum.pipelines', property_name=property_name)
return onnx_pipe.pipeline('text-classification', model=model, tokenizer=tokenizer,
accelerator='ort', device=device)
else:
transformers = import_optional_property_dependency('transformers', property_name=property_name)
return transformers.pipeline('text-classification', model=model, tokenizer=tokenizer, device=device)
The provided code snippet includes necessary dependencies for implementing the `formality` function. Write a Python function `def formality( text_batch: Sequence[str], device: Optional[str] = None, models_storage: Union[pathlib.Path, str, None] = None, use_onnx_models: bool = True, formality_classifier: Optional[object] = None ) -> Sequence[float]` to solve the following problem:
Return float representing formality.
Here is the function:
def formality(
text_batch: Sequence[str],
device: Optional[str] = None,
models_storage: Union[pathlib.Path, str, None] = None,
use_onnx_models: bool = True,
formality_classifier: Optional[object] = None
) -> Sequence[float]:
"""Return float representing formality."""
if formality_classifier is None:
use_onnx_models = _validate_onnx_model_availability(use_onnx_models, device)
model_name = FORMALITY_MODEL_NAME_ONNX if use_onnx_models else FORMALITY_MODEL_NAME
formality_classifier = get_transformer_pipeline(
property_name='formality', model_name=model_name, device=device,
models_storage=models_storage, use_onnx_model=use_onnx_models)
def output_formatter(v):
return v['score'] if v['label'] == 'formal' else 1 - v['score']
return predict_on_batch(text_batch, formality_classifier, output_formatter) | Return float representing formality. |
477 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
def _split_to_words_with_cache(text: str) -> List[str]:
"""Tokenize a text into words and cache the result."""
hash_key = hash_text(text)
if hash_key not in words_cache:
words = re.split(r'\W+', normalize_text(text, remove_stops=False, ignore_whitespace=False))
words = [w for w in words if w] # remove empty strings
words_cache[hash_key] = words
return words_cache[hash_key]
def _warn_if_missing_nltk_dependencies(dependency: str, property_name: str):
"""Warn if NLTK dependency is missing."""
warnings.warn(f'NLTK {dependency} not found, {property_name} cannot be calculated.'
' Please check your internet connection.', UserWarning)
The provided code snippet includes necessary dependencies for implementing the `lexical_density` function. Write a Python function `def lexical_density(text: str) -> float` to solve the following problem:
Return a float representing lexical density. Lexical density is the percentage of unique words in a given text. For more information: https://en.wikipedia.org/wiki/Lexical_density
Here is the function:
def lexical_density(text: str) -> float:
"""Return a float representing lexical density.
Lexical density is the percentage of unique words in a given text. For more
information: https://en.wikipedia.org/wiki/Lexical_density
"""
if pd.isna(text):
return np.nan
if not nltk_download('punkt', quiet=True):
_warn_if_missing_nltk_dependencies('punkt', 'Lexical Density')
return np.nan
all_words = _split_to_words_with_cache(text)
if len(all_words) == 0:
return np.nan
total_unique_words = len(set(all_words))
return round(total_unique_words / len(all_words), 2) | Return a float representing lexical density. Lexical density is the percentage of unique words in a given text. For more information: https://en.wikipedia.org/wiki/Lexical_density |
478 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
def _warn_if_missing_nltk_dependencies(dependency: str, property_name: str):
"""Warn if NLTK dependency is missing."""
warnings.warn(f'NLTK {dependency} not found, {property_name} cannot be calculated.'
' Please check your internet connection.', UserWarning)
The provided code snippet includes necessary dependencies for implementing the `unique_noun_count` function. Write a Python function `def unique_noun_count(text: Sequence[str]) -> int` to solve the following problem:
Return the number of unique noun words in the text.
Here is the function:
def unique_noun_count(text: Sequence[str]) -> int:
"""Return the number of unique noun words in the text."""
if pd.isna(text):
return np.nan
if not nltk_download('averaged_perceptron_tagger', quiet=True):
_warn_if_missing_nltk_dependencies('averaged_perceptron_tagger', 'Unique Noun Count')
return np.nan
unique_words_with_tags = set(textblob.TextBlob(text).tags)
return sum(1 for (_, tag) in unique_words_with_tags if tag.startswith('N')) | Return the number of unique noun words in the text. |
479 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
DEFAULT_SENTENCE_SAMPLE_SIZE = 300
def _split_to_words_with_cache(text: str) -> List[str]:
"""Tokenize a text into words and cache the result."""
hash_key = hash_text(text)
if hash_key not in words_cache:
words = re.split(r'\W+', normalize_text(text, remove_stops=False, ignore_whitespace=False))
words = [w for w in words if w] # remove empty strings
words_cache[hash_key] = words
return words_cache[hash_key]
def _sample_for_property(text: str, mode: str = 'words', limit: int = 10000, return_as_list=False,
random_seed: int = 42) -> Union[str, List[str]]:
"""Get a sample a single text sample for a text property.
Parameters
----------
text : str
The text to sample from.
mode : str, default 'words'
The mode to sample in. Can be either 'words' or 'sentences'.
limit : int, default 10000
The maximum number of words or sentences to sample.
"""
np.random.seed(random_seed)
if pd.isna(text):
return None
if mode == 'words':
all_units = _split_to_words_with_cache(text)
if len(all_units) > limit:
all_units = np.random.choice(all_units, size=limit, replace=False)
elif mode == 'sentences':
all_units = _split_to_sentences_with_cache(text)
if len(all_units) > limit:
all_units = np.random.choice(all_units, size=limit, replace=False)
else:
raise DeepchecksValueError(f'Unexpected mode - {mode}')
return ' '.join(all_units) if not return_as_list else list(all_units)
def _warn_if_missing_nltk_dependencies(dependency: str, property_name: str):
"""Warn if NLTK dependency is missing."""
warnings.warn(f'NLTK {dependency} not found, {property_name} cannot be calculated.'
' Please check your internet connection.', UserWarning)
The provided code snippet includes necessary dependencies for implementing the `readability_score` function. Write a Python function `def readability_score(text: str, cmudict_dict: dict = None) -> float` to solve the following problem:
Return a float representing the Flesch Reading-Ease score per text sample. In the Flesch reading-ease test, higher scores indicate material that is easier to read whereas lower numbers mark texts that are more difficult to read. For more information: https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests#Flesch_reading_ease
Here is the function:
def readability_score(text: str, cmudict_dict: dict = None) -> float:
"""Return a float representing the Flesch Reading-Ease score per text sample.
In the Flesch reading-ease test, higher scores indicate material that is easier to read
whereas lower numbers mark texts that are more difficult to read. For more information:
https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests#Flesch_reading_ease
"""
if pd.isna(text):
return np.nan
if cmudict_dict is None:
if not nltk_download('cmudict', quiet=True):
_warn_if_missing_nltk_dependencies('cmudict', 'Reading Ease')
return np.nan
cmudict_dict = corpus.cmudict.dict()
text_sentences = _sample_for_property(text, mode='sentences', limit=DEFAULT_SENTENCE_SAMPLE_SIZE,
return_as_list=True)
sentence_count = len(text_sentences)
words = _split_to_words_with_cache(text)
word_count = len(words)
syllable_count = sum([len(cmudict_dict[word]) for word in words if word in cmudict_dict])
if word_count != 0 and sentence_count != 0 and syllable_count != 0:
avg_syllables_per_word = syllable_count / word_count
avg_words_per_sentence = word_count / sentence_count
flesch_reading_ease = 206.835 - (1.015 * avg_words_per_sentence) - (84.6 * avg_syllables_per_word)
return round(flesch_reading_ease, 3)
else:
return np.nan | Return a float representing the Flesch Reading-Ease score per text sample. In the Flesch reading-ease test, higher scores indicate material that is easier to read whereas lower numbers mark texts that are more difficult to read. For more information: https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests#Flesch_reading_ease |
480 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
DEFAULT_SENTENCE_SAMPLE_SIZE = 300
def _split_to_words_with_cache(text: str) -> List[str]:
"""Tokenize a text into words and cache the result."""
hash_key = hash_text(text)
if hash_key not in words_cache:
words = re.split(r'\W+', normalize_text(text, remove_stops=False, ignore_whitespace=False))
words = [w for w in words if w] # remove empty strings
words_cache[hash_key] = words
return words_cache[hash_key]
def _sample_for_property(text: str, mode: str = 'words', limit: int = 10000, return_as_list=False,
random_seed: int = 42) -> Union[str, List[str]]:
"""Get a sample a single text sample for a text property.
Parameters
----------
text : str
The text to sample from.
mode : str, default 'words'
The mode to sample in. Can be either 'words' or 'sentences'.
limit : int, default 10000
The maximum number of words or sentences to sample.
"""
np.random.seed(random_seed)
if pd.isna(text):
return None
if mode == 'words':
all_units = _split_to_words_with_cache(text)
if len(all_units) > limit:
all_units = np.random.choice(all_units, size=limit, replace=False)
elif mode == 'sentences':
all_units = _split_to_sentences_with_cache(text)
if len(all_units) > limit:
all_units = np.random.choice(all_units, size=limit, replace=False)
else:
raise DeepchecksValueError(f'Unexpected mode - {mode}')
return ' '.join(all_units) if not return_as_list else list(all_units)
def remove_punctuation(text: str) -> str:
"""Remove punctuation characters from a string."""
return text.translate(str.maketrans('', '', string.punctuation))
The provided code snippet includes necessary dependencies for implementing the `average_words_per_sentence` function. Write a Python function `def average_words_per_sentence(text: str) -> float` to solve the following problem:
Return the average words per sentence in the text.
Here is the function:
def average_words_per_sentence(text: str) -> float:
"""Return the average words per sentence in the text."""
if pd.isna(text):
return np.nan
text_sentences = _sample_for_property(text, mode='sentences', limit=DEFAULT_SENTENCE_SAMPLE_SIZE,
return_as_list=True)
if text_sentences:
text_sentences = [remove_punctuation(sent) for sent in text_sentences]
total_words = sum([len(_split_to_words_with_cache(sentence)) for sentence in text_sentences])
return round(total_words / len(text_sentences), 3)
else:
return np.nan | Return the average words per sentence in the text. |
481 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
The provided code snippet includes necessary dependencies for implementing the `unique_urls_count` function. Write a Python function `def unique_urls_count(text: str) -> int` to solve the following problem:
Return the number of unique URLS in the text.
Here is the function:
def unique_urls_count(text: str) -> int:
"""Return the number of unique URLS in the text."""
if pd.isna(text):
return np.nan
url_pattern = r'https?:\/\/(?:[-\w.]|(?:%[\da-fA-F]{2}))+'
return len(set(re.findall(url_pattern, text))) | Return the number of unique URLS in the text. |
482 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
The provided code snippet includes necessary dependencies for implementing the `urls_count` function. Write a Python function `def urls_count(text: str) -> int` to solve the following problem:
Return the number of URLS in the text.
Here is the function:
def urls_count(text: str) -> int:
"""Return the number of URLS in the text."""
if pd.isna(text):
return np.nan
url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+'
return len(re.findall(url_pattern, text)) | Return the number of URLS in the text. |
483 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
The provided code snippet includes necessary dependencies for implementing the `unique_email_addresses_count` function. Write a Python function `def unique_email_addresses_count(text: str) -> int` to solve the following problem:
Return the number of unique email addresses in the text.
Here is the function:
def unique_email_addresses_count(text: str) -> int:
"""Return the number of unique email addresses in the text."""
if pd.isna(text):
return np.nan
email_pattern = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}\b'
return len(set(re.findall(email_pattern, text))) | Return the number of unique email addresses in the text. |
484 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
The provided code snippet includes necessary dependencies for implementing the `email_addresses_count` function. Write a Python function `def email_addresses_count(text: str) -> int` to solve the following problem:
Return the number of email addresses in the text.
Here is the function:
def email_addresses_count(text: str) -> int:
"""Return the number of email addresses in the text."""
if pd.isna(text):
return np.nan
email_pattern = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}\b'
return len(re.findall(email_pattern, text)) | Return the number of email addresses in the text. |
485 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
def _warn_if_missing_nltk_dependencies(dependency: str, property_name: str):
"""Warn if NLTK dependency is missing."""
warnings.warn(f'NLTK {dependency} not found, {property_name} cannot be calculated.'
' Please check your internet connection.', UserWarning)
def remove_punctuation(text: str) -> str:
"""Remove punctuation characters from a string."""
return text.translate(str.maketrans('', '', string.punctuation))
The provided code snippet includes necessary dependencies for implementing the `unique_syllables_count` function. Write a Python function `def unique_syllables_count(text: str, cmudict_dict: dict = None) -> int` to solve the following problem:
Return the number of unique syllables in the text.
Here is the function:
def unique_syllables_count(text: str, cmudict_dict: dict = None) -> int:
"""Return the number of unique syllables in the text."""
if pd.isna(text):
return np.nan
if not nltk_download('punkt', quiet=True):
_warn_if_missing_nltk_dependencies('punkt', 'Unique Syllables Count')
return np.nan
if cmudict_dict is None:
if not nltk_download('cmudict', quiet=True):
_warn_if_missing_nltk_dependencies('cmudict', 'Unique Syllables Count')
return np.nan
cmudict_dict = corpus.cmudict.dict()
text = remove_punctuation(text.lower())
words = word_tokenize(text)
syllables = {word: True for word in words if word in cmudict_dict}
return len(syllables) | Return the number of unique syllables in the text. |
486 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
The provided code snippet includes necessary dependencies for implementing the `reading_time` function. Write a Python function `def reading_time(text: str) -> int` to solve the following problem:
Return an integer representing time in seconds to read the text. The formula is based on Demberg & Keller, 2008 where it is assumed that reading a character taken 14.69 milliseconds on average.
Here is the function:
def reading_time(text: str) -> int:
"""Return an integer representing time in seconds to read the text.
The formula is based on Demberg & Keller, 2008 where it is assumed that
reading a character taken 14.69 milliseconds on average.
"""
if pd.isna(text):
return np.nan
ms_per_char = 14.69
words = text.split()
nchars = map(len, words)
rt_per_word = map(lambda nchar: nchar * ms_per_char, nchars)
ms_reading_time = sum(list(rt_per_word))
return round(ms_reading_time / 1000, 2) | Return an integer representing time in seconds to read the text. The formula is based on Demberg & Keller, 2008 where it is assumed that reading a character taken 14.69 milliseconds on average. |
487 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
def _split_to_sentences_with_cache(text: str) -> Union[List[str], None]:
"""Tokenize a text into sentences and cache the result."""
hash_key = hash_text(text)
if hash_key not in sentences_cache:
if not nltk_download('punkt', quiet=True):
_warn_if_missing_nltk_dependencies('punkt', 'property')
return None
sentences_cache[hash_key] = sent_tokenize(text)
return sentences_cache[hash_key]
def _warn_if_missing_nltk_dependencies(dependency: str, property_name: str):
"""Warn if NLTK dependency is missing."""
warnings.warn(f'NLTK {dependency} not found, {property_name} cannot be calculated.'
' Please check your internet connection.', UserWarning)
The provided code snippet includes necessary dependencies for implementing the `sentences_count` function. Write a Python function `def sentences_count(text: str) -> int` to solve the following problem:
Return the number of sentences in the text.
Here is the function:
def sentences_count(text: str) -> int:
"""Return the number of sentences in the text."""
if pd.isna(text):
return np.nan
if not nltk_download('punkt', quiet=True):
_warn_if_missing_nltk_dependencies('punkt', 'Sentences Count')
return np.nan
return len(_split_to_sentences_with_cache(text)) | Return the number of sentences in the text. |
488 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
def _split_to_sentences_with_cache(text: str) -> Union[List[str], None]:
"""Tokenize a text into sentences and cache the result."""
hash_key = hash_text(text)
if hash_key not in sentences_cache:
if not nltk_download('punkt', quiet=True):
_warn_if_missing_nltk_dependencies('punkt', 'property')
return None
sentences_cache[hash_key] = sent_tokenize(text)
return sentences_cache[hash_key]
def _warn_if_missing_nltk_dependencies(dependency: str, property_name: str):
"""Warn if NLTK dependency is missing."""
warnings.warn(f'NLTK {dependency} not found, {property_name} cannot be calculated.'
' Please check your internet connection.', UserWarning)
def remove_punctuation(text: str) -> str:
"""Remove punctuation characters from a string."""
return text.translate(str.maketrans('', '', string.punctuation))
The provided code snippet includes necessary dependencies for implementing the `average_syllable_length` function. Write a Python function `def average_syllable_length(text: str, cmudict_dict: dict = None) -> float` to solve the following problem:
Return a the average number of syllables per sentences per text sample.
Here is the function:
def average_syllable_length(text: str, cmudict_dict: dict = None) -> float:
"""Return a the average number of syllables per sentences per text sample."""
if pd.isna(text):
return np.nan
if not nltk_download('punkt', quiet=True):
_warn_if_missing_nltk_dependencies('punkt', 'Average Syllable Length')
return np.nan
if cmudict_dict is None:
if not nltk_download('cmudict', quiet=True):
_warn_if_missing_nltk_dependencies('cmudict', 'Average Syllable Length')
return np.nan
cmudict_dict = corpus.cmudict.dict()
sentence_count = len(_split_to_sentences_with_cache(text))
text = remove_punctuation(text.lower())
words = word_tokenize(text)
syllable_count = sum([len(cmudict_dict[word]) for word in words if word in cmudict_dict])
return round(syllable_count / sentence_count, 2) | Return a the average number of syllables per sentences per text sample. |
489 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
textblob_cache = {}
words_cache = {}
sentences_cache = {}
def _warn_if_missing_nltk_dependencies(dependency: str, property_name: str):
"""Warn if NLTK dependency is missing."""
warnings.warn(f'NLTK {dependency} not found, {property_name} cannot be calculated.'
' Please check your internet connection.', UserWarning)
def language(
text: str,
lang_certainty_threshold: float = 0.8,
fasttext_model: Optional[Dict[object, Any]] = None
) -> Union[str, None]:
"""Return text language, represented as a string."""
if not text:
return None
# Load the model if it wasn't received as a parameter. This is done to avoid loading the model
# each time the function is called.
if fasttext_model is None:
fasttext_model = get_fasttext_model()
# Predictions are the first prediction (k=1), only if the probability is above the threshold
prediction = fasttext_model.predict(text.replace('\n', ' '), k=1, threshold=lang_certainty_threshold)[0]
# label is empty for detection below threshold:
language_code = prediction[0].replace('__label__', '') if prediction else None
if language_code == 'eng': # both are english but different labels
return 'en'
return language_code
TOXICITY_MODEL_NAME = 'SkolkovoInstitute/roberta_toxicity_classifier'
TOXICITY_MODEL_NAME_ONNX = 'Deepchecks/roberta_toxicity_classifier_onnx'
FLUENCY_MODEL_NAME = 'prithivida/parrot_fluency_model'
FLUENCY_MODEL_NAME_ONNX = 'Deepchecks/parrot_fluency_model_onnx'
FORMALITY_MODEL_NAME = 's-nlp/roberta-base-formality-ranker'
FORMALITY_MODEL_NAME_ONNX = 'Deepchecks/roberta_base_formality_ranker_onnx'
def _batch_wrapper(text_batch: Sequence[str], func: Callable, **kwargs) -> List[Any]:
"""Wrap the non-batched properties execution with batches API."""
results = []
language_property_result = []
if 'language_property_result' in kwargs:
language_property_result = kwargs.pop('language_property_result')
language_property_exists = len(language_property_result) > 0
for i, text in enumerate(text_batch):
kwargs['language_property_result'] = language_property_result[i] if language_property_exists else None
results.append(run_available_kwargs(func, text=text, **kwargs))
return results
BATCH_PROPERTIES = ('Toxicity', 'Fluency', 'Formality')
ENGLISH_ONLY_PROPERTIES = (
'Sentiment', 'Subjectivity', 'Toxicity', 'Fluency', 'Formality', 'Reading Ease',
'Unique Noun Count', 'Unique Syllables Count', 'Sentences Count', 'Average Syllable Length'
)
CMUDICT_PROPERTIES = ('Average Syllable Length', 'Unique Syllables Count', 'Reading Ease')
def _select_properties(
include_properties: Optional[List[str]] = None,
ignore_properties: Optional[List[str]] = None,
include_long_calculation_properties: bool = False,
) -> Sequence[TextProperty]:
"""Select properties to calculate based on provided parameters."""
if include_properties is not None and ignore_properties is not None:
raise ValueError('Cannot use properties and ignore_properties parameters together.')
if include_properties is not None:
if not is_sequence_not_str(include_properties) \
and not all(isinstance(prop, str) for prop in include_properties):
raise DeepchecksValueError('include_properties must be a sequence of strings.')
if ignore_properties is not None:
if not is_sequence_not_str(ignore_properties) \
and not all(isinstance(prop, str) for prop in ignore_properties):
raise DeepchecksValueError('ignore_properties must be a sequence of strings.')
include_properties = [prop.lower() for prop in include_properties] if include_properties else None
ignore_properties = [prop.lower() for prop in ignore_properties] if ignore_properties else None
if include_properties is not None:
properties = [prop for prop in ALL_PROPERTIES if
prop['name'].lower() in include_properties] # pylint: disable=unsupported-membership-test
if len(properties) < len(include_properties):
not_found_properties = sorted(set(include_properties) - set(prop['name'].lower() for prop in properties))
raise DeepchecksValueError('include_properties contains properties that were not found: '
f'{not_found_properties}.')
elif ignore_properties is not None:
properties = [prop for prop in DEFAULT_PROPERTIES if
prop['name'].lower() not in ignore_properties] # pylint: disable=unsupported-membership-test
if len(properties) + len(ignore_properties) != len(DEFAULT_PROPERTIES):
default_property_names = [prop['name'].lower() for prop in DEFAULT_PROPERTIES]
not_found_properties = [prop for prop in list(ignore_properties) if prop not in default_property_names]
raise DeepchecksValueError('ignore_properties contains properties that were not found: '
f'{not_found_properties}.')
else:
properties = DEFAULT_PROPERTIES
# include_long_calculation_properties is only applicable when include_properties is None
if include_properties is None and not include_long_calculation_properties:
return [
prop for prop in properties
if prop['name'] not in LONG_RUN_PROPERTIES
]
return properties
def _warn_long_compute(device, properties_types, n_samples, use_onnx_models):
heavy_properties = [prop for prop in properties_types.keys() if prop in LONG_RUN_PROPERTIES]
if len(heavy_properties) and n_samples > LARGE_SAMPLE_SIZE:
warning_message = (
f'Calculating the properties {heavy_properties} on a large dataset may take a long time. '
'Consider using a smaller sample size or running this code on better hardware.'
)
if device == 'cpu' or (device is None and not use_onnx_models):
warning_message += ' Consider using a GPU or a similar device to run these properties.'
warnings.warn(warning_message, UserWarning)
def _validate_onnx_model_availability(use_onnx_models: bool, device: Optional[str]):
if not use_onnx_models:
return False
if find_spec('optimum') is None or find_spec('onnxruntime') is None:
warnings.warn('Onnx models require the optimum[onnxruntime-gpu] library to be installed. '
'Calculating using the default models.')
return False
if not torch.cuda.is_available():
warnings.warn('GPU is required for the onnx models. Calculating using the default models.')
return False
if device is not None and device.lower() == 'cpu':
warnings.warn('Onnx models are not supported on device CPU. Calculating using the default models.')
return False
return True
def get_transformer_pipeline(
property_name: str,
model_name: str,
device: Optional[str] = None,
models_storage: Union[pathlib.Path, str, None] = None,
use_onnx_model: bool = False,
use_cache=False
):
"""Return a transformers' pipeline for the given model name."""
if use_onnx_model and 'onnx' not in model_name.lower():
raise ValueError("use_onnx_model=True, but model_name is not for a 'onnx' model")
if use_cache:
model, tokenizer = _get_transformer_model_and_tokenizer(property_name, model_name,
models_storage, use_onnx_model)
else:
# __wrapped__ is simply the function without decoration, in our case - without caching
model, tokenizer = _get_transformer_model_and_tokenizer.__wrapped__(property_name, model_name,
models_storage, use_onnx_model)
if use_onnx_model:
onnx_pipe = import_optional_property_dependency('optimum.pipelines', property_name=property_name)
return onnx_pipe.pipeline('text-classification', model=model, tokenizer=tokenizer,
accelerator='ort', device=device)
else:
transformers = import_optional_property_dependency('transformers', property_name=property_name)
return transformers.pipeline('text-classification', model=model, tokenizer=tokenizer, device=device)
def get_cmudict_dict(use_cache=False):
"""Return corpus as dict."""
if use_cache:
return _get_cmudict_dict()
return _get_cmudict_dict.__wrapped__()
def get_fasttext_model(models_storage: Union[pathlib.Path, str, None] = None, use_cache=False):
"""Return fasttext model."""
if use_cache:
return _get_fasttext_model(models_storage)
return _get_fasttext_model.__wrapped__(models_storage)
def run_available_kwargs(func: Callable, **kwargs):
"""Run the passed object only with available kwargs."""
avail_kwargs = list(signature(func).parameters.keys())
pass_kwargs = {}
for kwarg_name in avail_kwargs:
if kwarg_name in kwargs:
pass_kwargs[kwarg_name] = kwargs[kwarg_name]
return func(**pass_kwargs)
def format_list(l: t.List[Hashable], max_elements_to_show: int = 10, max_string_length: int = 40) -> str:
"""Format columns properties for display in condition name.
Parameters
----------
l : List
list to print.
max_elements_to_show : int , default: 10
max elements to print before terminating.
max_string_length : int , default: 40
max string length before terminating.
Returns
-------
str
String of beautified list
"""
string_list = [str(i) for i in l[:max_elements_to_show]]
output = ', '.join(string_list)
if len(output) > max_string_length:
return output[:max_string_length] + '...'
if len(l) > max_elements_to_show:
return output + ', ...'
return output
The provided code snippet includes necessary dependencies for implementing the `calculate_builtin_properties` function. Write a Python function `def calculate_builtin_properties( raw_text: Sequence[str], include_properties: Optional[List[str]] = None, ignore_properties: Optional[List[str]] = None, include_long_calculation_properties: bool = False, ignore_non_english_samples_for_english_properties: bool = True, device: Optional[str] = None, models_storage: Union[pathlib.Path, str, None] = None, batch_size: Optional[int] = 16, cache_models: bool = False, use_onnx_models: bool = True, ) -> Tuple[Dict[str, List[float]], Dict[str, str]]` to solve the following problem:
Calculate properties on provided text samples. Parameters ---------- raw_text : Sequence[str] The text to calculate the properties for. include_properties : List[str], default None The properties to calculate. If None, all default properties will be calculated. Cannot be used together with ignore_properties parameter. Available properties are: ['Text Length', 'Average Word Length', 'Max Word Length', '% Special Characters', '% Punctuation', 'Language', 'Sentiment', 'Subjectivity', 'Toxicity', 'Fluency', 'Formality', 'Lexical Density', 'Unique Noun Count', 'Reading Ease', 'Average Words Per Sentence', 'URLs Count', Unique URLs Count', 'Email Address Count', 'Unique Email Address Count', 'Unique Syllables Count', 'Reading Time', 'Sentences Count', 'Average Syllable Length'] List of default properties are: ['Text Length', 'Average Word Length', 'Max Word Length', '% Special Characters', '% Punctuation', 'Language', 'Sentiment', 'Subjectivity', 'Toxicity', 'Fluency', 'Formality', 'Lexical Density', 'Unique Noun Count', 'Reading Ease', 'Average Words Per Sentence'] To calculate all the default properties, the include_properties and ignore_properties parameters should be None. If you pass either include_properties or ignore_properties then only the properties specified in the list will be calculated or ignored. Note that the properties ['Toxicity', 'Fluency', 'Formality', 'Language', 'Unique Noun Count'] may take a long time to calculate. If include_long_calculation_properties is False, these properties will be ignored, even if they are in the include_properties parameter. ignore_properties : List[str], default None The properties to ignore from the list of default properties. If None, no properties will be ignored and all the default properties will be calculated. Cannot be used together with include_properties parameter. include_long_calculation_properties : bool, default False Whether to include properties that may take a long time to calculate. If False, these properties will be ignored, unless they are specified in the include_properties parameter explicitly. ignore_non_english_samples_for_english_properties : bool, default True Whether to ignore samples that are not in English when calculating English properties. If False, samples that are not in English will be calculated as well. This parameter is ignored when calculating non-English properties. English-Only properties WILL NOT work properly on non-English samples, and this parameter should be used only when you are sure that all the samples are in English. device : Optional[str], default None The device to use for the calculation. If None, the default device will be used. For onnx based models it is recommended to set device to None for optimized performance. models_storage : Union[str, pathlib.Path, None], default None A directory to store the models. If not provided, models will be stored in `DEEPCHECKS_LIB_PATH/nlp/.nlp-models`. Also, if a folder already contains relevant resources they are not re-downloaded. batch_size : int, default 8 The batch size. cache_models : bool, default False If True, will store the models in device RAM memory. This will speed up the calculation for future calls. use_onnx_models : bool, default True If True, will use onnx gpu optimized models for the calculation. Requires the optimum[onnxruntime-gpu] library to be installed as well as the availability of GPU. Returns ------- Dict[str, List[float]] A dictionary with the property name as key and a list of the property values for each text as value. Dict[str, str] A dictionary with the property name as key and the property's type as value.
Here is the function:
def calculate_builtin_properties(
raw_text: Sequence[str],
include_properties: Optional[List[str]] = None,
ignore_properties: Optional[List[str]] = None,
include_long_calculation_properties: bool = False,
ignore_non_english_samples_for_english_properties: bool = True,
device: Optional[str] = None,
models_storage: Union[pathlib.Path, str, None] = None,
batch_size: Optional[int] = 16,
cache_models: bool = False,
use_onnx_models: bool = True,
) -> Tuple[Dict[str, List[float]], Dict[str, str]]:
"""Calculate properties on provided text samples.
Parameters
----------
raw_text : Sequence[str]
The text to calculate the properties for.
include_properties : List[str], default None
The properties to calculate. If None, all default properties will be calculated. Cannot be used
together with ignore_properties parameter. Available properties are:
['Text Length', 'Average Word Length', 'Max Word Length', '% Special Characters', '% Punctuation', 'Language',
'Sentiment', 'Subjectivity', 'Toxicity', 'Fluency', 'Formality', 'Lexical Density', 'Unique Noun Count',
'Reading Ease', 'Average Words Per Sentence', 'URLs Count', Unique URLs Count', 'Email Address Count',
'Unique Email Address Count', 'Unique Syllables Count', 'Reading Time', 'Sentences Count',
'Average Syllable Length']
List of default properties are: ['Text Length', 'Average Word Length', 'Max Word Length',
'% Special Characters', '% Punctuation', 'Language', 'Sentiment', 'Subjectivity', 'Toxicity', 'Fluency',
'Formality', 'Lexical Density', 'Unique Noun Count', 'Reading Ease', 'Average Words Per Sentence']
To calculate all the default properties, the include_properties and ignore_properties parameters should
be None. If you pass either include_properties or ignore_properties then only the properties specified
in the list will be calculated or ignored.
Note that the properties ['Toxicity', 'Fluency', 'Formality', 'Language', 'Unique Noun Count'] may
take a long time to calculate. If include_long_calculation_properties is False, these properties will be
ignored, even if they are in the include_properties parameter.
ignore_properties : List[str], default None
The properties to ignore from the list of default properties. If None, no properties will be ignored and
all the default properties will be calculated. Cannot be used together with include_properties parameter.
include_long_calculation_properties : bool, default False
Whether to include properties that may take a long time to calculate. If False, these properties will be
ignored, unless they are specified in the include_properties parameter explicitly.
ignore_non_english_samples_for_english_properties : bool, default True
Whether to ignore samples that are not in English when calculating English properties. If False, samples
that are not in English will be calculated as well. This parameter is ignored when calculating non-English
properties.
English-Only properties WILL NOT work properly on non-English samples, and this parameter should be used
only when you are sure that all the samples are in English.
device : Optional[str], default None
The device to use for the calculation. If None, the default device will be used. For onnx based models it is
recommended to set device to None for optimized performance.
models_storage : Union[str, pathlib.Path, None], default None
A directory to store the models.
If not provided, models will be stored in `DEEPCHECKS_LIB_PATH/nlp/.nlp-models`.
Also, if a folder already contains relevant resources they are not re-downloaded.
batch_size : int, default 8
The batch size.
cache_models : bool, default False
If True, will store the models in device RAM memory. This will speed up the calculation for future calls.
use_onnx_models : bool, default True
If True, will use onnx gpu optimized models for the calculation. Requires the optimum[onnxruntime-gpu] library
to be installed as well as the availability of GPU.
Returns
-------
Dict[str, List[float]]
A dictionary with the property name as key and a list of the property values for each text as value.
Dict[str, str]
A dictionary with the property name as key and the property's type as value.
"""
use_onnx_models = _validate_onnx_model_availability(use_onnx_models, device)
text_properties = _select_properties(
include_properties=include_properties,
ignore_properties=ignore_properties,
include_long_calculation_properties=include_long_calculation_properties
)
properties_types = {
it['name']: it['output_type']
for it in text_properties
}
_warn_long_compute(device, properties_types, len(raw_text), use_onnx_models)
kwargs = dict(device=device, models_storage=models_storage)
calculated_properties = {k: [] for k in properties_types.keys()}
# Prepare kwargs for properties that require outside resources:
kwargs['fasttext_model'] = get_fasttext_model(models_storage=models_storage, use_cache=cache_models)
properties_requiring_cmudict = list(set(CMUDICT_PROPERTIES) & set(properties_types.keys()))
if properties_requiring_cmudict:
if not nltk_download('cmudict', quiet=True):
_warn_if_missing_nltk_dependencies('cmudict', format_list(properties_requiring_cmudict))
for prop in properties_requiring_cmudict:
calculated_properties[prop] = [np.nan] * len(raw_text)
kwargs['cmudict_dict'] = get_cmudict_dict(use_cache=cache_models)
if 'Toxicity' in properties_types and 'toxicity_classifier' not in kwargs:
model_name = TOXICITY_MODEL_NAME_ONNX if use_onnx_models else TOXICITY_MODEL_NAME
kwargs['toxicity_classifier'] = get_transformer_pipeline(
property_name='toxicity', model_name=model_name, device=device,
models_storage=models_storage, use_cache=cache_models, use_onnx_model=use_onnx_models)
if 'Formality' in properties_types and 'formality_classifier' not in kwargs:
model_name = FORMALITY_MODEL_NAME_ONNX if use_onnx_models else FORMALITY_MODEL_NAME
kwargs['formality_classifier'] = get_transformer_pipeline(
property_name='formality', model_name=model_name, device=device,
models_storage=models_storage, use_cache=cache_models, use_onnx_model=use_onnx_models)
if 'Fluency' in properties_types and 'fluency_classifier' not in kwargs:
model_name = FLUENCY_MODEL_NAME_ONNX if use_onnx_models else FLUENCY_MODEL_NAME
kwargs['fluency_classifier'] = get_transformer_pipeline(
property_name='fluency', model_name=model_name, device=device,
models_storage=models_storage, use_cache=cache_models, use_onnx_model=use_onnx_models)
# Remove language property from the list of properties to calculate as it will be calculated separately:
text_properties = [prop for prop in text_properties if prop['name'] != 'Language']
warning_message = (
'Failed to calculate property {0}. '
'Dependencies required by property are not installed. '
'Error:\n{1}'
)
import_warnings = set()
# Calculate all properties for a specific batch than continue to the next batch
for i in tqdm(range(0, len(raw_text), batch_size)):
batch = raw_text[i:i + batch_size]
batch_properties = defaultdict(list)
# filtering out empty sequences
nan_indices = {i for i, seq in enumerate(batch) if pd.isna(seq) is True}
filtered_sequences = [e for i, e in enumerate(batch) if i not in nan_indices]
samples_language = _batch_wrapper(text_batch=filtered_sequences, func=language, **kwargs)
if 'Language' in properties_types:
batch_properties['Language'].extend(samples_language)
calculated_properties['Language'].extend(samples_language)
kwargs['language_property_result'] = samples_language # Pass the language property to other properties
kwargs['batch_size'] = batch_size
non_english_indices = set()
if ignore_non_english_samples_for_english_properties:
non_english_indices = {i for i, (seq, lang) in enumerate(zip(filtered_sequences, samples_language))
if lang != 'en'}
for prop in text_properties:
if prop['name'] in import_warnings: # Skip properties that failed to import:
batch_properties[prop['name']].extend([np.nan] * len(batch))
continue
sequences_to_use = list(filtered_sequences)
if prop['name'] in ENGLISH_ONLY_PROPERTIES and ignore_non_english_samples_for_english_properties:
sequences_to_use = [e for i, e in enumerate(sequences_to_use) if i not in non_english_indices]
try:
if prop['name'] in BATCH_PROPERTIES:
value = run_available_kwargs(text_batch=sequences_to_use, func=prop['method'], **kwargs)
else:
value = _batch_wrapper(text_batch=sequences_to_use, func=prop['method'], **kwargs)
batch_properties[prop['name']].extend(value)
except ImportError as e:
warnings.warn(warning_message.format(prop['name'], str(e)))
batch_properties[prop['name']].extend([np.nan] * len(batch))
import_warnings.add(prop['name'])
continue
# Fill in nan values for samples that were filtered out:
result_index = 0
for index, seq in enumerate(batch):
if index in nan_indices or (index in non_english_indices and
ignore_non_english_samples_for_english_properties and
prop['name'] in ENGLISH_ONLY_PROPERTIES):
calculated_properties[prop['name']].append(np.nan)
else:
calculated_properties[prop['name']].append(batch_properties[prop['name']][result_index])
result_index += 1
# Clear property caches:
textblob_cache.clear()
words_cache.clear()
sentences_cache.clear()
if not calculated_properties:
raise RuntimeError('Failed to calculate any of the properties.')
properties_types = {
k: v
for k, v in properties_types.items()
if k in calculated_properties
}
return calculated_properties, properties_types | Calculate properties on provided text samples. Parameters ---------- raw_text : Sequence[str] The text to calculate the properties for. include_properties : List[str], default None The properties to calculate. If None, all default properties will be calculated. Cannot be used together with ignore_properties parameter. Available properties are: ['Text Length', 'Average Word Length', 'Max Word Length', '% Special Characters', '% Punctuation', 'Language', 'Sentiment', 'Subjectivity', 'Toxicity', 'Fluency', 'Formality', 'Lexical Density', 'Unique Noun Count', 'Reading Ease', 'Average Words Per Sentence', 'URLs Count', Unique URLs Count', 'Email Address Count', 'Unique Email Address Count', 'Unique Syllables Count', 'Reading Time', 'Sentences Count', 'Average Syllable Length'] List of default properties are: ['Text Length', 'Average Word Length', 'Max Word Length', '% Special Characters', '% Punctuation', 'Language', 'Sentiment', 'Subjectivity', 'Toxicity', 'Fluency', 'Formality', 'Lexical Density', 'Unique Noun Count', 'Reading Ease', 'Average Words Per Sentence'] To calculate all the default properties, the include_properties and ignore_properties parameters should be None. If you pass either include_properties or ignore_properties then only the properties specified in the list will be calculated or ignored. Note that the properties ['Toxicity', 'Fluency', 'Formality', 'Language', 'Unique Noun Count'] may take a long time to calculate. If include_long_calculation_properties is False, these properties will be ignored, even if they are in the include_properties parameter. ignore_properties : List[str], default None The properties to ignore from the list of default properties. If None, no properties will be ignored and all the default properties will be calculated. Cannot be used together with include_properties parameter. include_long_calculation_properties : bool, default False Whether to include properties that may take a long time to calculate. If False, these properties will be ignored, unless they are specified in the include_properties parameter explicitly. ignore_non_english_samples_for_english_properties : bool, default True Whether to ignore samples that are not in English when calculating English properties. If False, samples that are not in English will be calculated as well. This parameter is ignored when calculating non-English properties. English-Only properties WILL NOT work properly on non-English samples, and this parameter should be used only when you are sure that all the samples are in English. device : Optional[str], default None The device to use for the calculation. If None, the default device will be used. For onnx based models it is recommended to set device to None for optimized performance. models_storage : Union[str, pathlib.Path, None], default None A directory to store the models. If not provided, models will be stored in `DEEPCHECKS_LIB_PATH/nlp/.nlp-models`. Also, if a folder already contains relevant resources they are not re-downloaded. batch_size : int, default 8 The batch size. cache_models : bool, default False If True, will store the models in device RAM memory. This will speed up the calculation for future calls. use_onnx_models : bool, default True If True, will use onnx gpu optimized models for the calculation. Requires the optimum[onnxruntime-gpu] library to be installed as well as the availability of GPU. Returns ------- Dict[str, List[float]] A dictionary with the property name as key and a list of the property values for each text as value. Dict[str, str] A dictionary with the property name as key and the property's type as value. |
490 | import pathlib
import pickle as pkl
import re
import string
import warnings
from collections import defaultdict
from importlib.util import find_spec
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import textblob
import torch.cuda
from nltk import corpus
from nltk import download as nltk_download
from nltk import sent_tokenize, word_tokenize
from tqdm import tqdm
from typing_extensions import TypedDict
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.utils.text import cut_string, hash_text, normalize_text, remove_punctuation
from deepchecks.nlp.utils.text_properties_models import get_cmudict_dict, get_fasttext_model, get_transformer_pipeline
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import SPECIAL_CHARACTERS, format_list
from deepchecks.utils.validation import is_sequence_not_str
ALL_PROPERTIES: Tuple[TextProperty, ...] = \
(
{'name': 'English Text', 'method': english_text, 'output_type': 'categorical'},
{'name': 'URLs Count', 'method': urls_count, 'output_type': 'numeric'},
{'name': 'Email Addresses Count', 'method': email_addresses_count, 'output_type': 'numeric'},
{'name': 'Unique URLs Count', 'method': unique_urls_count, 'output_type': 'numeric'},
{'name': 'Unique Email Addresses Count', 'method': unique_email_addresses_count, 'output_type': 'numeric'},
{'name': 'Unique Syllables Count', 'method': unique_syllables_count, 'output_type': 'numeric'},
{'name': 'Reading Time', 'method': reading_time, 'output_type': 'numeric'},
{'name': 'Sentences Count', 'method': sentences_count, 'output_type': 'numeric'},
{'name': 'Average Syllable Length', 'method': average_syllable_length, 'output_type': 'numeric'},
) + DEFAULT_PROPERTIES
The provided code snippet includes necessary dependencies for implementing the `get_builtin_properties_types` function. Write a Python function `def get_builtin_properties_types()` to solve the following problem:
Get the names of all the available builtin properties. Returns ------- Dict[str, str] A dictionary with the property name as key and the property's type as value.
Here is the function:
def get_builtin_properties_types():
"""
Get the names of all the available builtin properties.
Returns
-------
Dict[str, str]
A dictionary with the property name as key and the property's type as value.
"""
return {
prop['name']: prop['output_type']
for prop in ALL_PROPERTIES
} | Get the names of all the available builtin properties. Returns ------- Dict[str, str] A dictionary with the property name as key and the property's type as value. |
491 | from typing import List, Sequence
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objs as go
from deepchecks.nlp import TextData
from deepchecks.nlp.task_type import TaskType
from deepchecks.nlp.utils.text import break_to_lines_and_trim
from deepchecks.nlp.utils.text_properties import TEXT_PROPERTIES_DESCRIPTION
from deepchecks.nlp.utils.token_classification_utils import (annotated_token_classification_text,
count_token_classification_labels)
from deepchecks.utils.dataframes import un_numpy
from deepchecks.utils.distribution.plot import get_density
from deepchecks.utils.plot import DEFAULT_DATASET_NAMES, colors, common_and_outlier_colors
from deepchecks.utils.strings import get_docs_link
def clean_x_axis_non_existent_values(x_axis, distribution):
"""Remove values from x_axis where the distribution has no values."""
# Find the index of the first value in x_axis that is bigger than the value in distribution
ixs = np.searchsorted(sorted(distribution), x_axis, side='left')
# If 2 neighboring indexes are the same, it means that there are no values in the distribution for
# the corresponding value in x_axis. We remove it.
x_axis = [x_axis[i] for i in range(len(ixs)) if ixs[i] != ixs[i - 1]]
return x_axis
def break_to_lines_and_trim(s, max_lines: int = 10, min_line_length: int = 50, max_line_length: int = 60):
"""Break a string to lines and trim it to a maximum number of lines.
Parameters
----------
s : str
The string to break.
max_lines : int, default 10
The maximum number of lines to return.
min_line_length : int, default 50
The minimum length of a line.
max_line_length : int, default 60
The maximum length of a line.
"""
separating_delimiters = [' ', '\t', '\n', '\r']
lines = []
for i in range(max_lines): # pylint: disable=unused-variable
if len(s) < max_line_length: # if remaining string is short enough, add it and break
lines.append(s.strip())
break
else: # find the first delimiter from the end of the line
max_line_length = min(max_line_length, len(s)-1)
for j in range(max_line_length, min_line_length-1, -1):
if s[j] in separating_delimiters:
lines.append(s[:j])
s = s[j:].strip()
break
else: # if no delimiter was found, break in the middle of the line
# Check if breaking in the middle of an HTML tag
tag_start = re.search(r'<[^>]*$', s[:max_line_length])
if tag_start:
max_line_length = tag_start.start()
lines.append(s[:max_line_length].strip() + '-')
s = s[max_line_length:].strip()
else: # if the loop ended without breaking, and there is still text left, add an ellipsis
if len(s) > 0:
lines[-1] = lines[-1] + '...'
return '<br>'.join(lines)
TEXT_PROPERTIES_DESCRIPTION = {
'Text Length': 'Number of characters in the text',
'Average Word Length': 'Average number of characters in a word',
'Max Word Length': 'Maximum number of characters in a word',
'% Special Characters': 'Percentage of special characters in the text. Special characters are non-alphanumeric '
'unicode characters, excluding whitespaces and any of !\"#$%&\'()*+,-./:;=?\\@.',
'% Punctuation': 'Percentage of punctuation characters in the text. Punctuation characters are any of '
'!\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~',
'Language': 'Language of the text, using the fasttext language detection model',
'Sentiment': 'Sentiment of the text, calculated using the TextBlob sentiment analysis model.'
' Ranging from -1 (negative) to 1 (positive)',
'Subjectivity': 'Subjectivity of the text, calculated using the TextBlob sentiment analysis model. Ranging from 0 '
'(objective) to 1 (subjective)',
'Average Words Per Sentence': 'Average number of words per sentence in the text',
'Reading Ease': 'How easy to read a text sample is, typically ranges from around 0 (hard to read) to around '
'100 (very easy). Based on Flesch reading-ease score',
'Lexical Density': 'Ratio of unique words in the text',
'Toxicity': 'A measure of how harmful or offensive a text sample is (0 to 1), '
'uses the SkolkovoInstitute/roberta_toxicity_classifier model',
'Fluency': 'A measure of the fluency of the text (0 to 1), using the prithivida/parrot_fluency_model'
' model from the authors of the Parrot Paraphraser library',
'Formality': 'The formality / register of the text (0 to 1), using the s-nlp/roberta-base-formality-ranker'
' model by the Skolkovo Institute of Science and Technology',
'Unique Noun Count': 'Number of unique noun words in the text',
'URLs Count': 'Number of URLS per text sample',
'Email Addresses Count': 'Number of email addresses per text sample',
'Unique URLs Count': 'Number of unique URLS per text sample',
'English Text': 'Whether the text is in English (1) or not (0)',
'Unique Email Addresses Count': 'Number of unique email addresses per text sample',
'Unique Syllables Count': 'Number of unique syllables per text sample',
'Reading Time': 'Time taken in seconds to read a text sample',
'Sentences Count': 'Number of sentences per text sample',
'Average Syllable Length': 'Average number of syllables per sentence per text sample',
}
def un_numpy(val):
"""Convert numpy value to native value.
Parameters
----------
val :
The value to convert.
Returns
-------
returns the numpy value in a native type.
"""
if isinstance(val, np.str_):
# NOTE:
# 'np.str_' is instance of the 'np.generic' but
# 'np.isnan(np.str_())' raises an error with a next message:
# >> TypeError: ufunc 'isnan' not supported for the input types...)
#
# therefore this 'if' statement is needed
return val.item()
if isinstance(val, np.generic):
if np.isnan(val):
return None
return val.item()
if isinstance(val, np.ndarray):
return val.tolist()
return val
def get_density(data, xs) -> np.ndarray:
"""Get gaussian kde density to plot.
Parameters
----------
data
The data used to compute the pdf function.
xs : iterable
List of x values to plot the computed pdf for.
Returns
-------
np.array
The computed pdf values at the points xs.
"""
# Is only single value adds noise, otherwise there is singular matrix error
if len(np.unique(data)) == 1:
data = data + np.random.normal(scale=10 * np.finfo(np.float32).eps, size=len(data))
density = gaussian_kde(data)
density.covariance_factor = lambda: .25
# pylint: disable=protected-access
density._compute_covariance()
return density(xs)
common_and_outlier_colors = {'common': 'rgba(105, 179, 162, 1)',
'outliers': 'rgba(179, 106, 106, 1)',
'common_fill': 'rgba(105, 179, 162, 0.7)',
'outliers_fill': 'rgba(179, 106, 106, 0.7)'}
def get_docs_link():
"""Return the link to the docs with current version.
Returns
-------
str
the link to the docs.
"""
if deepchecks.__version__ and deepchecks.__version__ != 'dev':
version_obj: Version = Version(deepchecks.__version__)
# The version in the docs url is without the hotfix part
version = f'{version_obj.major}.{version_obj.minor}'
else:
version = 'stable'
return f'https://docs.deepchecks.com/{version}/'
The provided code snippet includes necessary dependencies for implementing the `get_text_outliers_graph` function. Write a Python function `def get_text_outliers_graph(dist: Sequence, data: Sequence[str], lower_limit: float, upper_limit: float, dist_name: str, is_categorical: bool)` to solve the following problem:
Create a distribution / bar graph of the data and its outliers. Parameters ---------- dist : Sequence The distribution of the data. data : Sequence[str] The data (used to give samples of it in hover). lower_limit : float The lower limit of the common part of the data (under it is an outlier). upper_limit : float The upper limit of the common part of the data (above it is an outlier). dist_name : str The name of the distribution (feature) is_categorical : bool Whether the data is categorical or not.
Here is the function:
def get_text_outliers_graph(dist: Sequence, data: Sequence[str], lower_limit: float, upper_limit: float, dist_name: str,
is_categorical: bool):
"""Create a distribution / bar graph of the data and its outliers.
Parameters
----------
dist : Sequence
The distribution of the data.
data : Sequence[str]
The data (used to give samples of it in hover).
lower_limit : float
The lower limit of the common part of the data (under it is an outlier).
upper_limit : float
The upper limit of the common part of the data (above it is an outlier).
dist_name : str
The name of the distribution (feature)
is_categorical : bool
Whether the data is categorical or not.
"""
green = common_and_outlier_colors['common']
red = common_and_outlier_colors['outliers']
green_fill = common_and_outlier_colors['common_fill']
red_fill = common_and_outlier_colors['outliers_fill']
if is_categorical:
dist_counts = pd.Series(dist).value_counts(normalize=True).to_dict()
counts = list(dist_counts.values())
categories_list = list(dist_counts.keys())
outliers_first_index = counts.index(lower_limit)
color_discrete_sequence = [green] * outliers_first_index + [red] * (len(counts) - outliers_first_index + 1)
# fixes plotly widget bug with numpy values by converting them to native values
# https://github.com/plotly/plotly.py/issues/3470
cat_df = pd.DataFrame(
{dist_name: counts},
index=[un_numpy(cat) for cat in categories_list]
)
outlier_line_index = 'Outlier<br>Threshold'
cat_df = pd.concat([cat_df.iloc[:outliers_first_index],
pd.DataFrame({dist_name: [None]}, index=[outlier_line_index]),
cat_df.iloc[outliers_first_index:]])
# Get samples and their frequency for the hover data:
tuples = list(zip(dist, data))
tuples.sort(key=lambda x: x[0])
samples_indices = np.searchsorted([x[0] for x in tuples], cat_df.index, side='left')
samples = [tuples[i][1] for i in samples_indices]
samples = [break_to_lines_and_trim(s) for s in samples]
hover_data = np.array([samples, list(cat_df.index), list(cat_df[dist_name])]).T
hover_template = f'<b>{dist_name}</b>: ' \
'%{customdata[1]}<br>' \
'<b>Frequency</b>: %{customdata[2]:.2%}<br>' \
'<b>Sample</b>:<br>"%{customdata[0]}"<br>'
traces = [
go.Bar(
x=cat_df.index,
y=cat_df[dist_name],
marker=dict(color=color_discrete_sequence),
name='Common',
text=[f'{x:.2%}' if x is not None else None for x in cat_df[dist_name]],
customdata=hover_data,
hovertemplate=hover_template
),
go.Bar( # Adding fake bar traces to show the outlier threshold line in the legend
x=[None],
y=[None],
name='Outliers',
marker=dict(color=red),
),
]
yaxis_layout = dict(
fixedrange=True,
autorange=True,
rangemode='normal',
title='Frequency (Log Scale)',
type='log'
)
xaxis_layout = dict(type='category')
else:
dist = dist[~pd.isnull(dist)]
x_range = (
dist.min(), dist.max()
)
if all(int(x) == x for x in dist if x is not None):
# If the distribution is discrete, we take all the values in it:
xs = sorted(np.unique(dist))
if len(xs) > 50:
# If there are too many values, we take only 50, using a constant interval between them:
xs = list(range(int(xs[0]), int(xs[-1]) + 1, int((xs[-1] - xs[0]) // 50)))
else:
# Heuristically take points on x-axis to show on the plot
# The intuition is the graph will look "smooth" wherever we will zoom it
# Also takes mean and median values in order to plot it later accurately
xs = sorted(np.concatenate((
np.linspace(x_range[0], x_range[1], 50),
np.quantile(dist, q=np.arange(0.02, 1, 0.02))
)))
xs = clean_x_axis_non_existent_values(xs, dist)
traces: List[go.BaseTraceType] = []
# In order to plot the common and outliers parts of the graph in different colors, we need to separate them into
# different traces. We do it by creating a mask for each part and then using it to filter the data.
# However, for the graphs to start and end smoothly, we need to add a point in the beginning and end of the
# common part. Those duplicate points will be set to start or end each trace in 0.
all_arr = [1 if lower_limit <= x <= upper_limit else 0 for x in xs]
common_beginning = all_arr.index(1)
common_ending = len(all_arr) - 1 - all_arr[::-1].index(1)
show_lower_outliers = common_beginning != 0
show_upper_outliers = common_ending != len(xs) - 1
total_len = len(xs) + show_lower_outliers + show_upper_outliers
mask_common = np.zeros(total_len, dtype=bool)
mask_outliers_lower = np.zeros(total_len, dtype=bool)
mask_outliers_upper = np.zeros(total_len, dtype=bool)
density = list(get_density(dist, xs))
# If there are lower outliers, add a duplicate point to the beginning of the common part:
if common_beginning != 0:
xs.insert(common_beginning, xs[common_beginning])
density.insert(common_beginning, density[common_beginning])
mask_outliers_lower[:common_beginning + 1] = True
common_ending += 1
# If there are upper outliers, add a duplicate point to the end of the common part:
if common_ending != len(xs) - 1:
xs.insert(common_ending + 1, xs[common_ending])
density.insert(common_ending + 1, density[common_ending])
mask_outliers_upper[common_ending + 1:] = True
mask_common[common_beginning + show_lower_outliers:common_ending + show_upper_outliers] = True
density_common = np.array(density) * mask_common
density_outliers_lower = np.array(density) * mask_outliers_lower
density_outliers_upper = np.array(density) * mask_outliers_upper
# Replace zeros (meaning, non-related values from the mask) with None so that they won't be plotted:
density_common = [x or None for x in density_common]
density_outliers_lower = [x or None for x in density_outliers_lower]
density_outliers_upper = [x or None for x in density_outliers_upper]
# Get samples and their quantiles for the hover data:
tuples = list(zip(dist, data))
tuples.sort(key=lambda x: x[0])
samples_indices = np.searchsorted([x[0] for x in tuples], xs, side='left')
samples = [tuples[i][1] for i in samples_indices]
samples = [break_to_lines_and_trim(s) for s in samples]
quantiles = [100 * i / len(dist) for i in samples_indices]
hover_data = np.array([samples, xs, quantiles]).T
hover_template = f'<b>{dist_name}</b>: ' \
'%{customdata[1]:.2f}<br>' \
'<b>Larger than</b> %{customdata[2]:.2f}% of samples<br>' \
'<b>Sample</b>:<br>"%{customdata[0]}"<br>'
traces.append(go.Scatter(
x=xs, y=density_common, name='Common', fill='tozeroy', fillcolor=green_fill,
line=dict(color=green, shape='linear', width=5), customdata=hover_data, hovertemplate=hover_template,
))
traces.append(go.Scatter(
x=xs, y=density_outliers_lower, name='Lower Outliers', fill='tozeroy', fillcolor=red_fill,
line=dict(color=red, shape='linear', width=5), customdata=hover_data, hovertemplate=hover_template))
traces.append(go.Scatter(
x=xs, y=density_outliers_upper, name='Upper Outliers', fill='tozeroy', fillcolor=red_fill,
line=dict(color=red, shape='linear', width=5), customdata=hover_data, hovertemplate=hover_template))
xaxis_layout = dict(fixedrange=False,
title=dist_name)
yaxis_layout = dict(title='Probability Density', fixedrange=True)
fig = go.Figure(data=traces)
fig.update_xaxes(xaxis_layout)
fig.update_yaxes(yaxis_layout)
if is_categorical: # Add vertical line to separate outliers from common values in bar charts:
fig.add_vline(x=outlier_line_index, line_width=2, line_dash='dash', line_color='black')
if dist_name in TEXT_PROPERTIES_DESCRIPTION:
dist_name = f'{dist_name}<sup><a href="{get_docs_link()}nlp/usage_guides/nlp_properties.html' \
'#deepchecks-built-in-properties">ⓘ</a></sup><br>' \
f'<sup>{TEXT_PROPERTIES_DESCRIPTION[dist_name]}</sup>'
fig.update_layout(
legend=dict(
title='Legend',
yanchor='top',
y=0.6),
height=400,
title=dict(text=dist_name, x=0.5, xanchor='center'),
bargroupgap=0,
hovermode='closest',
hoverdistance=-1)
return fig | Create a distribution / bar graph of the data and its outliers. Parameters ---------- dist : Sequence The distribution of the data. data : Sequence[str] The data (used to give samples of it in hover). lower_limit : float The lower limit of the common part of the data (under it is an outlier). upper_limit : float The upper limit of the common part of the data (above it is an outlier). dist_name : str The name of the distribution (feature) is_categorical : bool Whether the data is categorical or not. |
492 | from typing import List, Sequence
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `call_open_ai_completion_api` function. Write a Python function `def call_open_ai_completion_api(inputs: Sequence[str], max_tokens=200, batch_size=20, # api limit of 20 requests model: str = 'text-davinci-003', temperature: float = 0.5) -> List[str]` to solve the following problem:
Call the open ai completion api with the given inputs batch by batch. Parameters ---------- inputs : Sequence[str] The inputs to send to the api. max_tokens : int, default 200 The maximum number of tokens to return for each input. batch_size : int, default 20 The number of inputs to send in each batch. model : str, default 'text-davinci-003' The model to use for the question answering task. For more information about the models, see: https://beta.openai.com/docs/api-reference/models temperature : float, default 0.5 The temperature to use for the question answering task. For more information about the temperature, see: https://beta.openai.com/docs/api-reference/completions/create-completion Returns ------- List[str] The answers for the questions.
Here is the function:
def call_open_ai_completion_api(inputs: Sequence[str], max_tokens=200, batch_size=20, # api limit of 20 requests
model: str = 'text-davinci-003', temperature: float = 0.5) -> List[str]:
"""
Call the open ai completion api with the given inputs batch by batch.
Parameters
----------
inputs : Sequence[str]
The inputs to send to the api.
max_tokens : int, default 200
The maximum number of tokens to return for each input.
batch_size : int, default 20
The number of inputs to send in each batch.
model : str, default 'text-davinci-003'
The model to use for the question answering task. For more information about the models, see:
https://beta.openai.com/docs/api-reference/models
temperature : float, default 0.5
The temperature to use for the question answering task. For more information about the temperature, see:
https://beta.openai.com/docs/api-reference/completions/create-completion
Returns
-------
List[str]
The answers for the questions.
"""
try:
import openai # pylint: disable=import-outside-toplevel
except ImportError as e:
raise ImportError('question_answering_open_ai requires the openai python package. '
'To get it, run "pip install openai".') from e
from tenacity import retry, stop_after_attempt, wait_random_exponential # pylint: disable=import-outside-toplevel
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(15))
def _get_answers_with_backoff(questions_in_context):
return openai.Completion.create(engine=model, prompt=questions_in_context,
max_tokens=max_tokens, temperature=temperature)
answers = []
for sub_list in tqdm([inputs[x:x + batch_size] for x in range(0, len(inputs), batch_size)],
desc=f'Calculating Responses (Total of {len(inputs)})'):
open_ai_responses = _get_answers_with_backoff(sub_list)
choices = sorted(open_ai_responses['choices'], key=lambda x: x['index'])
answers = answers + [choice['text'] for choice in choices]
return answers | Call the open ai completion api with the given inputs batch by batch. Parameters ---------- inputs : Sequence[str] The inputs to send to the api. max_tokens : int, default 200 The maximum number of tokens to return for each input. batch_size : int, default 20 The number of inputs to send in each batch. model : str, default 'text-davinci-003' The model to use for the question answering task. For more information about the models, see: https://beta.openai.com/docs/api-reference/models temperature : float, default 0.5 The temperature to use for the question answering task. For more information about the temperature, see: https://beta.openai.com/docs/api-reference/completions/create-completion Returns ------- List[str] The answers for the questions. |
493 | import re
import sys
import warnings
from itertools import islice
from typing import Optional
import numpy as np
from tqdm import tqdm
EMBEDDING_MODEL = 'text-embedding-ada-002'
EMBEDDING_DIM = 1536
EMBEDDING_CTX_LENGTH = 8191
EMBEDDING_ENCODING = 'cl100k_base'
def encode_text(text, encoding_name):
"""Encode tokens with the given encoding."""
# tiktoken.get_encoding is only available in python 3.8 and above.
# This means that for python < 3.8, the batching is just using chunk_length chars each time.
if sys.version_info >= (3, 8):
import tiktoken # pylint: disable=import-outside-toplevel
encoding = tiktoken.get_encoding(encoding_name)
return encoding.encode(text)
else:
return text
def iterate_batched(tokenized_text, chunk_length):
"""Chunk text into tokens of length chunk_length."""
chunks_iterator = batched(tokenized_text, chunk_length)
yield from chunks_iterator
def _clean_special_chars(text: str) -> str:
"""
Remove special characters, replaces space characters with space.
Parameters
----------
text : str
The `text` parameter is a string that represents the input text that needs to be cleaned.
Returns
-------
text
Cleaned text string
"""
text = PATTERN_SPECIAL_CHARS.sub('', text)
text = PATTERN_SPACE_CHARS.sub(' ', text)
text = PATTERN_BR_CHARS.sub(' ', text)
return text
The provided code snippet includes necessary dependencies for implementing the `calculate_builtin_embeddings` function. Write a Python function `def calculate_builtin_embeddings(text: np.array, model: str = 'miniLM', file_path: Optional[str] = 'embeddings.npy', device: Optional[str] = None, long_sample_behaviour: str = 'average+warn', open_ai_batch_size: int = 500) -> np.array` to solve the following problem:
Get the built-in embeddings for the dataset. Parameters ---------- text : np.array The text to get embeddings for. model : str, default 'miniLM' The type of embeddings to return. Can be either 'miniLM' or 'open_ai'. For 'open_ai' option, the model used is 'text-embedding-ada-002' and requires to first set an open ai api key by using the command openai.api_key = YOUR_API_KEY file_path : Optional[str], default 'embeddings.csv' If given, the embeddings will be saved to the given file path. device : str, default None The device to use for the embeddings. If None, the default device will be used. long_sample_behaviour : str, default 'average+warn' How to handle long samples. Averaging is done as described in https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb Currently, applies only to the 'open_ai' model, as the 'miniLM' model can handle long samples. Options are: - 'average+warn' (default): average the embeddings of the chunks and warn if the sample is too long. - 'average': average the embeddings of the chunks. - 'truncate': truncate the sample to the maximum length. - 'raise': raise an error if the sample is too long. - 'nan': return an embedding vector of nans for each sample that is too long. open_ai_batch_size : int, default 500 The amount of samples to send to open ai in each batch. Reduce if getting errors from open ai. Returns ------- np.array The embeddings for the dataset.
Here is the function:
def calculate_builtin_embeddings(text: np.array, model: str = 'miniLM',
file_path: Optional[str] = 'embeddings.npy',
device: Optional[str] = None,
long_sample_behaviour: str = 'average+warn',
open_ai_batch_size: int = 500) -> np.array:
"""
Get the built-in embeddings for the dataset.
Parameters
----------
text : np.array
The text to get embeddings for.
model : str, default 'miniLM'
The type of embeddings to return. Can be either 'miniLM' or 'open_ai'.
For 'open_ai' option, the model used is 'text-embedding-ada-002' and requires to first set an open ai api key
by using the command openai.api_key = YOUR_API_KEY
file_path : Optional[str], default 'embeddings.csv'
If given, the embeddings will be saved to the given file path.
device : str, default None
The device to use for the embeddings. If None, the default device will be used.
long_sample_behaviour : str, default 'average+warn'
How to handle long samples. Averaging is done as described in
https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb
Currently, applies only to the 'open_ai' model, as the 'miniLM' model can handle long samples.
Options are:
- 'average+warn' (default): average the embeddings of the chunks and warn if the sample is too long.
- 'average': average the embeddings of the chunks.
- 'truncate': truncate the sample to the maximum length.
- 'raise': raise an error if the sample is too long.
- 'nan': return an embedding vector of nans for each sample that is too long.
open_ai_batch_size : int, default 500
The amount of samples to send to open ai in each batch. Reduce if getting errors from open ai.
Returns
-------
np.array
The embeddings for the dataset.
"""
if model == 'miniLM':
try:
import sentence_transformers # pylint: disable=import-outside-toplevel
except ImportError as e:
raise ImportError(
'calculate_builtin_embeddings with model="miniLM" requires the sentence_transformers python package. '
'To get it, run "pip install sentence_transformers".') from e
model = sentence_transformers.SentenceTransformer('all-MiniLM-L6-v2', device=device)
embeddings = model.encode(text)
elif model == 'open_ai':
try:
import openai # pylint: disable=import-outside-toplevel
except ImportError as e:
raise ImportError('calculate_builtin_embeddings with model="open_ai" requires the openai python package. '
'To get it, run "pip install openai".') from e
from tenacity import (retry, retry_if_not_exception_type, # pylint: disable=import-outside-toplevel
stop_after_attempt, wait_random_exponential)
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6),
retry=retry_if_not_exception_type(openai.InvalidRequestError))
def _get_embedding_with_backoff(text_or_tokens, model=EMBEDDING_MODEL):
return openai.Embedding.create(input=text_or_tokens, model=model)['data']
def len_safe_get_embedding(list_of_texts, model_name=EMBEDDING_MODEL, max_tokens=EMBEDDING_CTX_LENGTH,
encoding_name=EMBEDDING_ENCODING):
"""Get embeddings for a list of texts, chunking them if necessary."""
chunked_texts = []
chunk_lens = []
encoded_texts = []
max_sample_length = 0
skip_sample_indices = set()
for i, text_sample in enumerate(list_of_texts):
tokens_in_sample = encode_text(text_sample, encoding_name=encoding_name)
tokens_per_sample = []
num_chunks = 0
for chunk in iterate_batched(tokens_in_sample, chunk_length=max_tokens):
if long_sample_behaviour == 'nan' and num_chunks > 0:
# If nan condition was met, we're going to skip this sample
skip_sample_indices.add(i)
break
# cache the index for each chunk
chunked_texts.append((i, chunk))
chunk_lens.append(len(chunk))
tokens_per_sample += chunk
max_sample_length = max(max_sample_length, len(tokens_per_sample))
num_chunks += 1
if long_sample_behaviour == 'truncate':
break
encoded_texts.append(tokens_per_sample)
if max_sample_length > max_tokens:
if long_sample_behaviour == 'average+warn':
warnings.warn(f'At least one sample is longer than {max_tokens} tokens, which is the maximum '
f'context window handled by {model}. Maximal sample length '
f'found is {max_sample_length} tokens. The sample will be split into chunks and the '
f'embeddings will be averaged. To avoid this warning, set '
f'long_sample_behaviour="average" or long_sample_behaviour="truncate".')
elif long_sample_behaviour == 'raise':
raise ValueError(f'At least one sample is longer than {max_tokens} tokens, which is the maximum '
f'context window handled by {model}. Maximal sample '
f'length found is {max_sample_length} tokens. To avoid this error, set '
f'long_sample_behaviour="average" or long_sample_behaviour="truncate".')
# Filter out the first chunk of samples in skip_sample_indices
filtered_chunked_texts = [chunk for i, chunk in chunked_texts if i not in skip_sample_indices]
chunk_embeddings_output = []
for sub_list in tqdm([filtered_chunked_texts[x:x + open_ai_batch_size]
for x in range(0, len(filtered_chunked_texts), open_ai_batch_size)],
desc='Calculating Embeddings '):
chunk_embeddings_output.extend(_get_embedding_with_backoff(sub_list, model=model_name))
chunk_embeddings = [embedding['embedding'] for embedding in chunk_embeddings_output]
result_embeddings = []
idx = 0
for i, tokens_in_sample in enumerate(encoded_texts):
# If the sample was too long and long_sample_averaging is set to nan, we skip it
# and return a vector of nans. Otherwise, we average the embeddings of the chunks.
# Note that idx only increases if the sample was not skipped, thus keeping us on the same index as
# the filtered chunk_embeddings list.
if i in skip_sample_indices:
text_embedding = np.ones((EMBEDDING_DIM, )) * np.nan
else:
text_embeddings = []
text_lens = []
# while loop to get all chunks for this sample
while idx < len(chunk_lens) and sum(text_lens) < len(tokens_in_sample):
text_embeddings.append(chunk_embeddings[idx])
text_lens.append(chunk_lens[idx])
idx += 1
if sum(text_lens) == 0:
text_embedding = np.ones((EMBEDDING_DIM, )) * np.nan
else:
text_embedding = np.average(text_embeddings, axis=0, weights=text_lens)
text_embedding = text_embedding / np.linalg.norm(text_embedding) # normalizes length to 1
result_embeddings.append(text_embedding.tolist())
return result_embeddings
clean_text = [_clean_special_chars(x) for x in text]
embeddings = len_safe_get_embedding(clean_text)
else:
raise ValueError(f'Unknown model type: {model}')
embeddings = np.array(embeddings).astype(np.float16)
if file_path is not None:
np.save(file_path, embeddings)
return embeddings | Get the built-in embeddings for the dataset. Parameters ---------- text : np.array The text to get embeddings for. model : str, default 'miniLM' The type of embeddings to return. Can be either 'miniLM' or 'open_ai'. For 'open_ai' option, the model used is 'text-embedding-ada-002' and requires to first set an open ai api key by using the command openai.api_key = YOUR_API_KEY file_path : Optional[str], default 'embeddings.csv' If given, the embeddings will be saved to the given file path. device : str, default None The device to use for the embeddings. If None, the default device will be used. long_sample_behaviour : str, default 'average+warn' How to handle long samples. Averaging is done as described in https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb Currently, applies only to the 'open_ai' model, as the 'miniLM' model can handle long samples. Options are: - 'average+warn' (default): average the embeddings of the chunks and warn if the sample is too long. - 'average': average the embeddings of the chunks. - 'truncate': truncate the sample to the maximum length. - 'raise': raise an error if the sample is too long. - 'nan': return an embedding vector of nans for each sample that is too long. open_ai_batch_size : int, default 500 The amount of samples to send to open ai in each batch. Reduce if getting errors from open ai. Returns ------- np.array The embeddings for the dataset. |
494 | import re
import string
import typing as t
import unicodedata
import warnings
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
def normalize_text(
text_sample: str,
*,
ignore_case: bool = True,
remove_punct: bool = True,
normalize_uni: bool = True,
remove_stops: bool = True,
ignore_whitespace: bool = False
) -> str:
"""Normalize given text sample."""
if ignore_case:
text_sample = text_sample.lower()
if remove_punct:
text_sample = remove_punctuation(text_sample)
if normalize_uni:
text_sample = normalize_unicode(text_sample)
if remove_stops:
text_sample = remove_stopwords(text_sample)
if ignore_whitespace:
text_sample = ''.join(text_sample.split())
return text_sample
from typing import List
The provided code snippet includes necessary dependencies for implementing the `normalize_samples` function. Write a Python function `def normalize_samples( text_samples: t.Sequence[str], *, ignore_case: bool = True, remove_punct: bool = True, normalize_uni: bool = True, remove_stops: bool = True, ignore_whitespace: bool = False ) -> t.List[str]` to solve the following problem:
Normalize given sequence of text samples.
Here is the function:
def normalize_samples(
text_samples: t.Sequence[str],
*,
ignore_case: bool = True,
remove_punct: bool = True,
normalize_uni: bool = True,
remove_stops: bool = True,
ignore_whitespace: bool = False
) -> t.List[str]:
"""Normalize given sequence of text samples."""
return [
normalize_text(
it,
ignore_case=ignore_case,
remove_punct=remove_punct,
normalize_uni=normalize_uni,
remove_stops=remove_stops,
ignore_whitespace=ignore_whitespace
)
for it in text_samples
] | Normalize given sequence of text samples. |
495 | import re
import string
import typing as t
import unicodedata
import warnings
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
def hash_text(text: str) -> int:
"""Hash a text sample."""
assert isinstance(text, str)
return hash(text)
from typing import List
The provided code snippet includes necessary dependencies for implementing the `hash_samples` function. Write a Python function `def hash_samples(text: t.Sequence[str]) -> t.List[int]` to solve the following problem:
Hash a sequence of text samples.
Here is the function:
def hash_samples(text: t.Sequence[str]) -> t.List[int]:
"""Hash a sequence of text samples."""
assert not isinstance(text, str)
return [hash_text(it) for it in text] | Hash a sequence of text samples. |
496 | from typing import List, Optional
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from plotly.subplots import make_subplots
from deepchecks.nlp.task_type import TaskType, TTextLabel
from deepchecks.nlp.utils.text import break_to_lines_and_trim
from deepchecks.nlp.utils.text_properties import TEXT_PROPERTIES_DESCRIPTION
from deepchecks.utils.dataframes import un_numpy
from deepchecks.utils.distribution.plot import get_density
from deepchecks.utils.plot import feature_distribution_colors
from deepchecks.utils.strings import format_percent, get_docs_link
def _calculate_annoation_ratio(label, n_samples, is_mutli_label, task_type):
if label is None:
return format_percent(0)
if is_mutli_label or task_type == TaskType.TOKEN_CLASSIFICATION:
annotated_count = _calculate_number_of_annotated_samples(label=label,
is_multi_label=is_mutli_label,
task_type=task_type)
return format_percent(annotated_count / n_samples)
else:
return format_percent(pd.notna(label).sum() / n_samples)
def _generate_table_trace(n_samples, annotation_ratio, categorical_metadata, numerical_metadata,
categorical_properties, numerical_properties):
data_cell = ['<b>Number of samples</b>', '<b>Annotation ratio</b>', '<b>Metadata categorical columns</b>',
'<b>Metadata numerical columns</b>', '<b>Categorical properties</b>', '<b>Numerical properties</b>']
info_cell = _get_table_row_data(n_samples=n_samples, annotation_ratio=annotation_ratio,
categorical_metadata=categorical_metadata, numerical_metadata=numerical_metadata,
categorical_properties=categorical_properties,
numerical_properties=numerical_properties, max_values_to_show=7)
trace = go.Table(header={'fill': {'color': 'white'}},
cells={'values': [data_cell, info_cell], 'align': ['left'], 'font_size': 12,
'height': 30})
return trace
def _generate_categorical_distribution_plot(data, property_name):
dist_counts = data.value_counts(normalize=True).to_dict()
counts = list(dist_counts.values())
categories_list = list(dist_counts.keys())
cat_df = pd.DataFrame({property_name: counts}, index=[un_numpy(cat) for cat in categories_list])
trace = go.Bar(x=cat_df.index, y=cat_df[property_name], showlegend=False,
marker={'color': feature_distribution_colors['feature']},
hovertemplate='<b>Value:</b> %{x}<br><b>Frequency:</b> %{y}<extra></extra>')
yaxis_layout = dict(type='log', title='Frequency (Log Scale)')
xaxis_layout = dict(title=property_name)
return trace, xaxis_layout, yaxis_layout
def _get_distribution_values(data):
mean = data.mean()
median = data.median()
x_range = (data.min(), data.max())
if all(int(x) == x for x in data if x is not None):
# If the distribution is discrete, we take all the values in it:
xs = sorted(np.unique(data))
if len(xs) > 50:
# If there are too many values, we take only 50, using a constant interval between them:
xs = list(range(int(xs[0]), int(xs[-1]) + 1, int((xs[-1] - xs[0]) // 50)))
else:
xs = sorted(np.concatenate((np.linspace(x_range[0], x_range[1], 50),
np.quantile(data, q=np.arange(0.02, 1, 0.02)),
[mean, median]
)))
ixs = np.searchsorted(sorted(data), xs, side='left')
xs = [xs[i] for i in range(len(ixs)) if ixs[i] != ixs[i - 1]]
y_value = get_density(data, xs)
return y_value, xs
def _calculate_number_of_annotated_samples(label, is_multi_label, task_type):
if is_multi_label or task_type == TaskType.TOKEN_CLASSIFICATION:
annotated_count = 0
for label_data in label:
annotated_count = annotated_count + 1 if len(label_data) > 0 and pd.isna(label_data).sum() == 0 \
else annotated_count
return annotated_count
else:
return pd.notna(label).sum()
def _generate_numeric_distribution_plot(data, x_value, y_value, property_name):
mean = data.mean()
percentile_90 = data.quantile(0.9)
percentile_10 = data.quantile(0.1)
median = data.median()
trace = go.Scatter(x=x_value, y=y_value, fill='tozeroy', showlegend=False,
hovertemplate=f'<b>{property_name}:</b> ''%{x}<br><b>Density:</b> %{y}<extra></extra>',
line={'color': feature_distribution_colors['feature'],
'shape': 'linear', 'width': 5})
shapes = []
annotations = []
shapes.append(dict(type='line', x0=mean, y0=0, x1=mean, y1=max(y_value),
line={'color': feature_distribution_colors['measure'], 'dash': 'dash', 'width': 3}))
mean_xpos = mean + max(x_value) * 0.02 if median < mean else mean - max(x_value) * 0.02
annotations.append(dict(x=mean_xpos, y=max(y_value)/2, text='<b>Mean</b>', showarrow=False,
textangle=-90, font={'size': 12}))
shapes.append(dict(type='line', x0=median, y0=0, x1=median, y1=max(y_value),
line={'color': feature_distribution_colors['measure'], 'dash': 'dot', 'width': 3}))
median_xpos = median - max(x_value) * 0.02 if median < mean else median + max(x_value) * 0.02
annotations.append(dict(x=median_xpos, y=max(y_value)/2, text='<b>Median</b>', showarrow=False,
textangle=-90, font={'size': 12}))
shapes.append(dict(type='line', x0=percentile_10, y0=0, x1=percentile_10, y1=max(y_value),
line={'color': feature_distribution_colors['measure'], 'dash': 'dashdot', 'width': 3}))
annotations.append(dict(x=percentile_10 - max(x_value)*0.02, y=max(y_value)/2, textangle=-90,
text='<b>10<sup>th</sup> Percentile</b>', showarrow=False, font={'size': 12}))
shapes.append(dict(type='line', x0=percentile_90, y0=0, x1=percentile_90, y1=max(y_value),
line={'color': feature_distribution_colors['measure'], 'dash': 'dashdot', 'width': 3}))
annotations.append(dict(x=percentile_90 + max(x_value)*0.02, y=max(y_value)/2, textangle=-90,
text='<b>90<sup>th</sup> Percentile</b>', showarrow=False, font={'size': 12}))
xaxis_layout = dict(title=property_name)
yaxis_layout = dict(title='Density')
return trace, shapes, annotations, xaxis_layout, yaxis_layout
TTextLabel = t.Union[TClassLabel, TTokenLabel, TNoneLabel]
class TaskType(Enum):
"""Enum containing supported task types."""
TEXT_CLASSIFICATION = 'text_classification'
TOKEN_CLASSIFICATION = 'token_classification'
OTHER = 'other'
def break_to_lines_and_trim(s, max_lines: int = 10, min_line_length: int = 50, max_line_length: int = 60):
"""Break a string to lines and trim it to a maximum number of lines.
Parameters
----------
s : str
The string to break.
max_lines : int, default 10
The maximum number of lines to return.
min_line_length : int, default 50
The minimum length of a line.
max_line_length : int, default 60
The maximum length of a line.
"""
separating_delimiters = [' ', '\t', '\n', '\r']
lines = []
for i in range(max_lines): # pylint: disable=unused-variable
if len(s) < max_line_length: # if remaining string is short enough, add it and break
lines.append(s.strip())
break
else: # find the first delimiter from the end of the line
max_line_length = min(max_line_length, len(s)-1)
for j in range(max_line_length, min_line_length-1, -1):
if s[j] in separating_delimiters:
lines.append(s[:j])
s = s[j:].strip()
break
else: # if no delimiter was found, break in the middle of the line
# Check if breaking in the middle of an HTML tag
tag_start = re.search(r'<[^>]*$', s[:max_line_length])
if tag_start:
max_line_length = tag_start.start()
lines.append(s[:max_line_length].strip() + '-')
s = s[max_line_length:].strip()
else: # if the loop ended without breaking, and there is still text left, add an ellipsis
if len(s) > 0:
lines[-1] = lines[-1] + '...'
return '<br>'.join(lines)
TEXT_PROPERTIES_DESCRIPTION = {
'Text Length': 'Number of characters in the text',
'Average Word Length': 'Average number of characters in a word',
'Max Word Length': 'Maximum number of characters in a word',
'% Special Characters': 'Percentage of special characters in the text. Special characters are non-alphanumeric '
'unicode characters, excluding whitespaces and any of !\"#$%&\'()*+,-./:;=?\\@.',
'% Punctuation': 'Percentage of punctuation characters in the text. Punctuation characters are any of '
'!\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~',
'Language': 'Language of the text, using the fasttext language detection model',
'Sentiment': 'Sentiment of the text, calculated using the TextBlob sentiment analysis model.'
' Ranging from -1 (negative) to 1 (positive)',
'Subjectivity': 'Subjectivity of the text, calculated using the TextBlob sentiment analysis model. Ranging from 0 '
'(objective) to 1 (subjective)',
'Average Words Per Sentence': 'Average number of words per sentence in the text',
'Reading Ease': 'How easy to read a text sample is, typically ranges from around 0 (hard to read) to around '
'100 (very easy). Based on Flesch reading-ease score',
'Lexical Density': 'Ratio of unique words in the text',
'Toxicity': 'A measure of how harmful or offensive a text sample is (0 to 1), '
'uses the SkolkovoInstitute/roberta_toxicity_classifier model',
'Fluency': 'A measure of the fluency of the text (0 to 1), using the prithivida/parrot_fluency_model'
' model from the authors of the Parrot Paraphraser library',
'Formality': 'The formality / register of the text (0 to 1), using the s-nlp/roberta-base-formality-ranker'
' model by the Skolkovo Institute of Science and Technology',
'Unique Noun Count': 'Number of unique noun words in the text',
'URLs Count': 'Number of URLS per text sample',
'Email Addresses Count': 'Number of email addresses per text sample',
'Unique URLs Count': 'Number of unique URLS per text sample',
'English Text': 'Whether the text is in English (1) or not (0)',
'Unique Email Addresses Count': 'Number of unique email addresses per text sample',
'Unique Syllables Count': 'Number of unique syllables per text sample',
'Reading Time': 'Time taken in seconds to read a text sample',
'Sentences Count': 'Number of sentences per text sample',
'Average Syllable Length': 'Average number of syllables per sentence per text sample',
}
def get_docs_link():
"""Return the link to the docs with current version.
Returns
-------
str
the link to the docs.
"""
if deepchecks.__version__ and deepchecks.__version__ != 'dev':
version_obj: Version = Version(deepchecks.__version__)
# The version in the docs url is without the hotfix part
version = f'{version_obj.major}.{version_obj.minor}'
else:
version = 'stable'
return f'https://docs.deepchecks.com/{version}/'
The provided code snippet includes necessary dependencies for implementing the `text_data_describe_plot` function. Write a Python function `def text_data_describe_plot(n_samples: int, max_num_labels_to_show: int, is_multi_label: bool, task_type: str, properties: pd.DataFrame, categorical_metadata: Optional[List[str]] = None, numerical_metadata: Optional[List[str]] = None, categorical_properties: Optional[List[str]] = None, numerical_properties: Optional[List[str]] = None, model_classes: Optional[List[str]] = None, label: Optional[TTextLabel] = None)` to solve the following problem:
Return a plotly figure instance. Parameters ---------- properties: pd.DataFrame The DataFrame consisting of the text properties data. If no prooperties are there, you can pass an empty DataFrame as well. n_samples: int The total number of samples present in the TextData object. max_num_labels_to_show : int The threshold to display the maximum number of labels on the label distribution pie chart and display rest of the labels under "Others" category. is_multi_label: bool A boolean where True denotes that the TextData contains multi labeled data otherwise false. task_type: str The task type for the text data. Can be either 'text_classification' or 'token_classification'. categorical_metadata: Optional[List[str]], default: None The names of the categorical metadata columns. numerical_metadata: Optional[List[str]], default: None The names of the numerical metadata columns. categorical_properties: Optional[List[str]], default: None The names of the categorical properties columns. numerical_properties: Optional[List[str]], default: None The names of the numerical text properties columns. label: Optional[TTextLabel], default: None The label for the text data. Can be either a text_classification label or a token_classification label. If None, the label distribution graph is not generated. - text_classification label - For text classification the accepted label format differs between multilabel and single label cases. For single label data, the label should be passed as a sequence of labels, with one entry per sample that can be either a string or an integer. For multilabel data, the label should be passed as a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample. - token_classification label - For token classification the accepted label format is the IOB format or similar to it. The Label must be a sequence of sequences of strings or integers, with each sequence corresponding to a sample in the tokenized text, and exactly the length of the corresponding tokenized text. model_classes: Optional[List[str]], default: None List of classes names to use for multi-label display. Only used if the dataset is multi-label. Returns ------- Plotly Figure instance.
Here is the function:
def text_data_describe_plot(n_samples: int, max_num_labels_to_show: int,
is_multi_label: bool, task_type: str,
properties: pd.DataFrame,
categorical_metadata: Optional[List[str]] = None,
numerical_metadata: Optional[List[str]] = None,
categorical_properties: Optional[List[str]] = None,
numerical_properties: Optional[List[str]] = None,
model_classes: Optional[List[str]] = None,
label: Optional[TTextLabel] = None):
"""Return a plotly figure instance.
Parameters
----------
properties: pd.DataFrame
The DataFrame consisting of the text properties data. If no prooperties are there, you can pass an
empty DataFrame as well.
n_samples: int
The total number of samples present in the TextData object.
max_num_labels_to_show : int
The threshold to display the maximum number of labels on the label distribution pie chart and display
rest of the labels under "Others" category.
is_multi_label: bool
A boolean where True denotes that the TextData contains multi labeled data otherwise false.
task_type: str
The task type for the text data. Can be either 'text_classification' or 'token_classification'.
categorical_metadata: Optional[List[str]], default: None
The names of the categorical metadata columns.
numerical_metadata: Optional[List[str]], default: None
The names of the numerical metadata columns.
categorical_properties: Optional[List[str]], default: None
The names of the categorical properties columns.
numerical_properties: Optional[List[str]], default: None
The names of the numerical text properties columns.
label: Optional[TTextLabel], default: None
The label for the text data. Can be either a text_classification label or a token_classification label.
If None, the label distribution graph is not generated.
- text_classification label - For text classification the accepted label format differs between multilabel and
single label cases. For single label data, the label should be passed as a sequence of labels, with one entry
per sample that can be either a string or an integer. For multilabel data, the label should be passed as a
sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of
the i-th label in that sample.
- token_classification label - For token classification the accepted label format is the IOB format or similar
to it. The Label must be a sequence of sequences of strings or integers, with each sequence corresponding to
a sample in the tokenized text, and exactly the length of the corresponding tokenized text.
model_classes: Optional[List[str]], default: None
List of classes names to use for multi-label display. Only used if the dataset is multi-label.
Returns
-------
Plotly Figure instance.
"""
specs = [[{'type': 'pie'}, {'type': 'table'}] if label is not None else [{'type': 'table', 'colspan': 2}, None]] + \
[[{'type': 'xy', 'colspan': 2}, None] for _ in range(len(properties.columns))]
subplot_titles = []
if label is not None:
annotated_samples = _calculate_number_of_annotated_samples(label, is_multi_label, task_type)
subplot_titles.append(f'Label Distribution<br><sup>Out of {annotated_samples} annotated samples</sup><br><br>')
subplot_titles.append('') # Empty title for table figure
if not properties.empty:
for prop_name in properties:
if prop_name in TEXT_PROPERTIES_DESCRIPTION:
subplot_titles.append(f'{prop_name} Property Distribution<sup><a href="{get_docs_link()}nlp/'
'usage_guides/nlp_properties.html#deepchecks-built-in-properties">ⓘ</a>'
f'</sup><br><sup>{TEXT_PROPERTIES_DESCRIPTION[prop_name]}</sup>')
fig = make_subplots(rows=len(properties.columns) + 1, cols=2, specs=specs, subplot_titles=subplot_titles,
row_heights=[1.5] + [1.0] * len(properties.columns))
# Create label distribution if label is provided
if label is not None:
if is_multi_label:
df_label = pd.DataFrame(label).fillna(0)
if model_classes is not None:
hashmap = {}
for val in label:
model_array = np.array([model_classes[i] for i, val in enumerate(val) if val == 1])
for class_name in model_array:
hashmap[class_name] = hashmap[class_name] + 1 if class_name in hashmap else 1
label_counts = pd.Series(list(hashmap.values()), index=list(hashmap))
else:
label_counts = pd.Series(np.sum(df_label.to_numpy(), axis=0))
elif task_type == TaskType.TOKEN_CLASSIFICATION:
hashmap = {}
for val in label:
flattened_array = pd.Series(np.array(val).flatten()).fillna('NaN').to_numpy()
unique_values, counts = np.unique(flattened_array, return_counts=True)
for label_value, count in zip(unique_values, counts):
if label_value != 'NaN':
hashmap[label_value] = hashmap[label_value] + count if label_value in hashmap else count
label_counts = pd.Series(list(hashmap.values()), index=list(hashmap))
else:
label_counts = pd.Series(label).value_counts()
label_counts.sort_values(ascending=False, inplace=True)
labels_to_display = label_counts[:max_num_labels_to_show]
labels_to_display.index = [break_to_lines_and_trim(str(label)) for label in list(labels_to_display.index)]
count_other_labels = label_counts[max_num_labels_to_show + 1:].sum()
labels_to_display['Others'] = count_other_labels
# Pie chart for label distribution
fig.add_trace(go.Pie(labels=list(labels_to_display.index), values=list(labels_to_display),
textposition='inside', showlegend=False, textinfo='label+percent',
hovertemplate='%{label}: %{value} samples<extra></extra>'), row=1, col=1)
# Table figure for displaying some statistics
annotation_ratio = _calculate_annoation_ratio(label, n_samples, is_multi_label, task_type)
table_trace = _generate_table_trace(n_samples, annotation_ratio, categorical_metadata, numerical_metadata,
categorical_properties, numerical_properties)
fig.add_trace(table_trace, row=1, col=2 if label is not None else 1)
# Looping over all the properties to generate respective property distribution graphs
curr_row = 2 # Since row 1 is occupied with Pie and Table
for property_name in properties.columns:
if property_name in categorical_properties:
# Creating bar plots for categorical properties
trace, xaxis_layout, yaxis_layout = _generate_categorical_distribution_plot(
properties[property_name], property_name
)
fig.add_trace(trace, row=curr_row, col=1)
fig.update_xaxes(xaxis_layout, row=curr_row, col=1)
fig.update_yaxes(yaxis_layout, row=curr_row, col=1)
else:
# Creating scatter plots for numerical properties
y_value, xs = _get_distribution_values(properties[property_name])
trace, shapes, annotations, xaxis_layout, yaxis_layout = _generate_numeric_distribution_plot(
properties[property_name],
xs, y_value, property_name
)
fig.add_trace(trace, row=curr_row, col=1)
for shape, annotation in zip(shapes, annotations):
fig.add_shape(shape, row=curr_row, col=1)
fig.add_annotation(annotation, row=curr_row, col=1)
fig.update_yaxes(yaxis_layout, row=curr_row, col=1)
fig.update_xaxes(xaxis_layout, row=curr_row, col=1)
curr_row += 1
fig.update_layout(height=450*(len(properties.columns) + 1))
return fig | Return a plotly figure instance. Parameters ---------- properties: pd.DataFrame The DataFrame consisting of the text properties data. If no prooperties are there, you can pass an empty DataFrame as well. n_samples: int The total number of samples present in the TextData object. max_num_labels_to_show : int The threshold to display the maximum number of labels on the label distribution pie chart and display rest of the labels under "Others" category. is_multi_label: bool A boolean where True denotes that the TextData contains multi labeled data otherwise false. task_type: str The task type for the text data. Can be either 'text_classification' or 'token_classification'. categorical_metadata: Optional[List[str]], default: None The names of the categorical metadata columns. numerical_metadata: Optional[List[str]], default: None The names of the numerical metadata columns. categorical_properties: Optional[List[str]], default: None The names of the categorical properties columns. numerical_properties: Optional[List[str]], default: None The names of the numerical text properties columns. label: Optional[TTextLabel], default: None The label for the text data. Can be either a text_classification label or a token_classification label. If None, the label distribution graph is not generated. - text_classification label - For text classification the accepted label format differs between multilabel and single label cases. For single label data, the label should be passed as a sequence of labels, with one entry per sample that can be either a string or an integer. For multilabel data, the label should be passed as a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample. - token_classification label - For token classification the accepted label format is the IOB format or similar to it. The Label must be a sequence of sequences of strings or integers, with each sequence corresponding to a sample in the tokenized text, and exactly the length of the corresponding tokenized text. model_classes: Optional[List[str]], default: None List of classes names to use for multi-label display. Only used if the dataset is multi-label. Returns ------- Plotly Figure instance. |
497 | import warnings
from typing import List, Tuple
import numpy as np
import pandas as pd
from seqeval.metrics.sequence_labeling import get_entities
from sklearn.base import BaseEstimator
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.nlp.task_type import TaskType
class DeepchecksValueError(DeepchecksBaseError):
"""Exception class that represent a fault parameter was passed to Deepchecks."""
pass
class TaskType(Enum):
"""Enum containing supported task types."""
TEXT_CLASSIFICATION = 'text_classification'
TOKEN_CLASSIFICATION = 'token_classification'
OTHER = 'other'
The provided code snippet includes necessary dependencies for implementing the `infer_observed_and_model_labels` function. Write a Python function `def infer_observed_and_model_labels( train_dataset=None, test_dataset=None, model: BaseEstimator = None, y_pred_train: np.ndarray = None, y_pred_test: np.ndarray = None, model_classes: list = None, task_type: TaskType = None ) -> Tuple[List, List]` to solve the following problem:
Infer the observed labels from the given datasets and predictions. Parameters ---------- train_dataset : Union[TextData, None], default None TextData object, representing data an estimator was fitted on test_dataset : Union[TextData, None], default None TextData object, representing data an estimator predicts on model : Union[BaseEstimator, None], default None A fitted estimator instance y_pred_train : np.array Predictions on train_dataset y_pred_test : np.array Predictions on test_dataset model_classes : Optional[List], default None list of classes known to the model task_type : Union[TaskType, None], default None The task type of the model Returns ------- observed_classes : list List of observed label values. For multi-label, returns number of observed labels. model_classes : list List of the user-given model classes. For multi-label, if not given by the user, returns a range of len(label)
Here is the function:
def infer_observed_and_model_labels(
train_dataset=None,
test_dataset=None,
model: BaseEstimator = None,
y_pred_train: np.ndarray = None,
y_pred_test: np.ndarray = None,
model_classes: list = None,
task_type: TaskType = None
) -> Tuple[List, List]:
"""
Infer the observed labels from the given datasets and predictions.
Parameters
----------
train_dataset : Union[TextData, None], default None
TextData object, representing data an estimator was fitted on
test_dataset : Union[TextData, None], default None
TextData object, representing data an estimator predicts on
model : Union[BaseEstimator, None], default None
A fitted estimator instance
y_pred_train : np.array
Predictions on train_dataset
y_pred_test : np.array
Predictions on test_dataset
model_classes : Optional[List], default None
list of classes known to the model
task_type : Union[TaskType, None], default None
The task type of the model
Returns
-------
observed_classes : list
List of observed label values. For multi-label, returns number of observed labels.
model_classes : list
List of the user-given model classes. For multi-label, if not given by the user, returns a range of
len(label)
"""
# TODO: Doesn't work for predictions
train_labels = []
test_labels = []
have_model = model is not None # Currently irrelevant as no model is given in NLP
if train_dataset:
if train_dataset.has_label():
train_labels += list(train_dataset.label)
if have_model:
train_labels += list(model.predict(train_dataset))
if test_dataset:
if test_dataset.has_label():
test_labels += list(test_dataset.label)
if have_model:
test_labels += list(model.predict(test_dataset))
if task_type == TaskType.TOKEN_CLASSIFICATION:
# Flatten:
train_labels = [token_label for sentence in train_labels for token_label in sentence]
test_labels = [token_label for sentence in test_labels for token_label in sentence]
if model_classes and 'O' in model_classes:
model_classes = [c for c in model_classes if c != 'O']
warnings.warn(
'"O" label was removed from model_classes as it is ignored by metrics for token classification',
UserWarning)
observed_classes = np.array(test_labels + train_labels, dtype=object)
if len(observed_classes.shape) == 2: # For the multi-label case
len_observed_label = observed_classes.shape[1]
if not model_classes:
model_classes = list(range(len_observed_label))
observed_classes = list(range(len_observed_label))
else:
if len(model_classes) != len_observed_label:
raise DeepchecksValueError(f'Received model_classes of length {len(model_classes)}, '
f'but data indicates labels of length {len_observed_label}')
observed_classes = model_classes
else:
observed_classes = observed_classes[~pd.isnull(observed_classes)]
observed_classes = sorted(np.unique(observed_classes))
if task_type == TaskType.TOKEN_CLASSIFICATION:
observed_classes = [c for c in observed_classes if c != 'O']
observed_classes = sorted({tag for tag, _, _ in get_entities(observed_classes)})
return observed_classes, model_classes | Infer the observed labels from the given datasets and predictions. Parameters ---------- train_dataset : Union[TextData, None], default None TextData object, representing data an estimator was fitted on test_dataset : Union[TextData, None], default None TextData object, representing data an estimator predicts on model : Union[BaseEstimator, None], default None A fitted estimator instance y_pred_train : np.array Predictions on train_dataset y_pred_test : np.array Predictions on test_dataset model_classes : Optional[List], default None list of classes known to the model task_type : Union[TaskType, None], default None The task type of the model Returns ------- observed_classes : list List of observed label values. For multi-label, returns number of observed labels. model_classes : list List of the user-given model classes. For multi-label, if not given by the user, returns a range of len(label) |
498 | import warnings
import numpy as np
import pandas as pd
from numba import NumbaDeprecationWarning
from sklearn.decomposition import PCA
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from deepchecks.core.check_utils.multivariate_drift_utils import auc_to_drift_score, build_drift_plot
from deepchecks.nlp import TextData
from deepchecks.nlp.utils.nlp_plot import two_datasets_scatter_plot
SAMPLES_FOR_REDUCTION_FIT = 1000
def display_embeddings(train_dataset: TextData, test_dataset: TextData, random_state: int, model_classes: list):
"""Display the embeddings with the domain classifier proba as the x-axis and the embeddings as the y-axis."""
embeddings = np.concatenate([train_dataset.embeddings, test_dataset.embeddings])
reducer = UMAP(n_components=2, n_neighbors=5, init='random', min_dist=1, random_state=random_state)
reduced_embeddings = reducer.fit_transform(embeddings)
x_axis_title = 'Reduced Embedding (0)'
y_axis_title = 'Reduced Embedding (1)'
plot_data = pd.DataFrame({x_axis_title: reduced_embeddings[:, 0],
y_axis_title: reduced_embeddings[:, 1]})
plot_title = 'Scatter Plot of Embeddings Space (reduced to 2 dimensions)'
return two_datasets_scatter_plot(plot_title=plot_title, plot_data=plot_data, train_dataset=train_dataset,
test_dataset=test_dataset, model_classes=model_classes)
def auc_to_drift_score(auc: float) -> float:
"""Calculate the drift score, which is 2*auc - 1, with auc being the auc of the Domain Classifier.
Parameters
----------
auc : float
auc of the Domain Classifier
"""
return max(2 * auc - 1, 0)
def build_drift_plot(score):
"""Build traffic light drift plot."""
bar_traces, x_axis, y_axis = drift_score_bar_traces(score)
x_axis['title'] = 'Drift score'
drift_plot = go.Figure(layout=dict(
title='Drift Score - Multivariable',
xaxis=x_axis,
yaxis=y_axis,
height=200
))
drift_plot.add_traces(bar_traces)
return drift_plot
The provided code snippet includes necessary dependencies for implementing the `run_multivariable_drift_for_embeddings` function. Write a Python function `def run_multivariable_drift_for_embeddings(train_dataset: TextData, test_dataset: TextData, sample_size: int, random_state: int, test_size: float, num_samples_in_display: int, dimension_reduction_method: str, model_classes: list, with_display: bool)` to solve the following problem:
Calculate multivariable drift on embeddings.
Here is the function:
def run_multivariable_drift_for_embeddings(train_dataset: TextData, test_dataset: TextData,
sample_size: int, random_state: int, test_size: float,
num_samples_in_display: int, dimension_reduction_method: str,
model_classes: list, with_display: bool):
"""Calculate multivariable drift on embeddings."""
np.random.seed(random_state)
# sample train and test datasets equally
train_sample = train_dataset.sample(sample_size, random_state=random_state)
test_sample = test_dataset.sample(sample_size, random_state=random_state)
train_sample_embeddings = train_sample.embeddings
test_sample_embeddings = test_sample.embeddings
# create new dataset, with label denoting whether sample belongs to test dataset
domain_class_array = np.concatenate([train_sample_embeddings, test_sample_embeddings])
domain_class_labels = pd.Series([0] * len(train_sample_embeddings) + [1] * len(test_sample_embeddings))
# reduce dimensionality of embeddings if needed.
# skips if not required ('none') or if number of features is small enough (< 30) in 'auto' mode.
use_reduction = not (dimension_reduction_method == 'none' or (
dimension_reduction_method == 'auto' and domain_class_array.shape[1] < 30))
use_umap = dimension_reduction_method == 'umap' or (dimension_reduction_method == 'auto' and with_display)
if use_reduction:
if use_umap:
reducer = UMAP(n_components=10, n_neighbors=5, init='random',
random_state=np.random.RandomState(random_state))
else: # Faster, but graph will look bad.
reducer = PCA(n_components=10, random_state=random_state)
samples_for_reducer = min(SAMPLES_FOR_REDUCTION_FIT, len(domain_class_array))
samples = np.random.choice(len(domain_class_array), samples_for_reducer, replace=False)
reducer.fit(domain_class_array[samples])
domain_class_array = reducer.transform(domain_class_array)
# update train and test samples with new reduced embeddings (used later in display)
new_embeddings_train = domain_class_array[:len(train_sample_embeddings)]
new_embeddings_test = domain_class_array[len(train_sample_embeddings):]
train_sample.set_embeddings(new_embeddings_train, verbose=False)
test_sample.set_embeddings(new_embeddings_test, verbose=False)
x_train, x_test, y_train, y_test = train_test_split(domain_class_array, domain_class_labels,
stratify=domain_class_labels, random_state=random_state,
test_size=test_size)
# train a model to disguise between train and test samples
domain_classifier = GradientBoostingClassifier(max_depth=2, random_state=random_state)
domain_classifier.fit(x_train, y_train)
y_pred = domain_classifier.predict_proba(x_test)[:, 1]
domain_classifier_auc = roc_auc_score(y_test, y_pred)
drift_score = auc_to_drift_score(domain_classifier_auc)
values_dict = {'domain_classifier_auc': domain_classifier_auc, 'domain_classifier_drift_score': drift_score}
if with_display:
relevant_index_train = list(y_test[y_test == 0].index)
relevant_index_test = [x - len(train_sample_embeddings) for x in y_test[y_test == 1].index]
train_sample = train_sample.copy(rows_to_use=relevant_index_train)
test_sample = test_sample.copy(rows_to_use=relevant_index_test)
# Sample data before display calculations
num_samples_in_display_train = min(int(num_samples_in_display/2), sample_size, len(train_sample))
train_dataset_for_display = train_sample.sample(num_samples_in_display_train, random_state=random_state)
num_samples_in_display_test = min(int(num_samples_in_display/2), sample_size, len(test_sample))
test_dataset_for_display = test_sample.sample(num_samples_in_display_test, random_state=random_state)
displays = [build_drift_plot(drift_score),
display_embeddings(train_dataset=train_dataset_for_display,
test_dataset=test_dataset_for_display,
random_state=random_state,
model_classes=model_classes)]
else:
displays = None
return values_dict, displays | Calculate multivariable drift on embeddings. |
499 | import io
import traceback
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union, cast
import jsonpickle
import jsonpickle.ext.pandas as jsonpickle_pd
import pandas as pd
from ipywidgets import Widget
from pandas.io.formats.style import Styler
from plotly.basedatatypes import BaseFigure
from deepchecks.core.condition import ConditionCategory, ConditionResult
from deepchecks.core.display import DisplayableResult, save_as_html
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.core.reduce_classes import ReduceMixin
from deepchecks.core.serialization.abc import HTMLFormatter
from deepchecks.core.serialization.check_failure.html import CheckFailureSerializer as CheckFailureHtmlSerializer
from deepchecks.core.serialization.check_failure.ipython import CheckFailureSerializer as CheckFailureIPythonSerializer
from deepchecks.core.serialization.check_failure.json import CheckFailureSerializer as CheckFailureJsonSerializer
from deepchecks.core.serialization.check_failure.widget import CheckFailureSerializer as CheckFailureWidgetSerializer
from deepchecks.core.serialization.check_result.html import CheckResultSection
from deepchecks.core.serialization.check_result.html import CheckResultSerializer as CheckResultHtmlSerializer
from deepchecks.core.serialization.check_result.ipython import CheckResultSerializer as CheckResultIPythonSerializer
from deepchecks.core.serialization.check_result.json import CheckResultSerializer as CheckResultJsonSerializer
from deepchecks.core.serialization.check_result.widget import CheckResultSerializer as CheckResultWidgetSerializer
from deepchecks.utils.strings import widget_to_html_string
from deepchecks.utils.wandb_utils import wandb_run
CheckResultSection = t.Union[
Literal['condition-table'],
Literal['additional-output'],
]
def detalize_additional_output(show_additional_outputs: bool) -> List[CheckResultSection]:
return (
['condition-table', 'additional-output']
if show_additional_outputs
else ['condition-table']
) | null |
500 | import abc
import io
import json
import pathlib
import warnings
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Type, Union, cast
import jsonpickle
from bs4 import BeautifulSoup
from ipywidgets import Widget
from typing_extensions import Self, TypedDict
from deepchecks import __version__
from deepchecks.core import check_result as check_types
from deepchecks.core.checks import BaseCheck, CheckConfig
from deepchecks.core.display import DisplayableResult, save_as_html
from deepchecks.core.errors import DeepchecksNotSupportedError, DeepchecksValueError
from deepchecks.core.serialization.abc import HTMLFormatter
from deepchecks.core.serialization.suite_result.html import SuiteResultSerializer as SuiteResultHtmlSerializer
from deepchecks.core.serialization.suite_result.ipython import SuiteResultSerializer as SuiteResultIPythonSerializer
from deepchecks.core.serialization.suite_result.json import SuiteResultSerializer as SuiteResultJsonSerializer
from deepchecks.core.serialization.suite_result.widget import SuiteResultSerializer as SuiteResultWidgetSerializer
from deepchecks.utils.strings import get_random_string, widget_to_html_string
from deepchecks.utils.wandb_utils import wandb_run
from . import common
The provided code snippet includes necessary dependencies for implementing the `sort_check_results` function. Write a Python function `def sort_check_results( check_results: Sequence['check_types.BaseCheckResult'] ) -> List['check_types.BaseCheckResult']` to solve the following problem:
Sort sequence of 'CheckResult' instances. Returns ------- List[check_types.CheckResult]
Here is the function:
def sort_check_results(
check_results: Sequence['check_types.BaseCheckResult']
) -> List['check_types.BaseCheckResult']:
"""Sort sequence of 'CheckResult' instances.
Returns
-------
List[check_types.CheckResult]
"""
order = []
check_results_index = {}
for index, it in enumerate(check_results):
check_results_index[index] = it
if isinstance(it, check_types.CheckResult):
order.append((it.priority, index))
elif isinstance(it, check_types.CheckFailure):
order.append((998, index))
else:
order.append((999, index))
order = sorted(order)
return [
check_results_index[index]
for _, index in order
] | Sort sequence of 'CheckResult' instances. Returns ------- List[check_types.CheckResult] |
501 | import typing as t
import numpy as np
import pandas as pd
from deepchecks.core import ConditionResult
from deepchecks.core.condition import ConditionCategory
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.utils.dict_funcs import get_dict_entry_by_value
from deepchecks.utils.strings import format_number, format_percent
class ConditionCategory(enum.Enum):
"""Condition result category. indicates whether the result should fail the suite."""
FAIL = 'FAIL'
WARN = 'WARN'
PASS = 'PASS'
ERROR = 'ERROR'
def format_number(x, floating_point: int = 2) -> str:
"""Format number for elegant display.
Parameters
----------
x
Number to be displayed
floating_point : int , default: 2
Number of floating points to display
Returns
-------
str
String of beautified number
"""
def add_commas(x):
return f'{x:,}' # yes this actually formats the number 1000 to "1,000"
if np.isnan(x):
return 'nan'
# 0 is lost in the next if case, so we have it here as a special use-case
if x == 0:
return '0'
# If x is a very small number, that would be rounded to 0, we would prefer to return it as the format 1.0E-3.
if abs(x) < 10 ** (-floating_point):
return f'{Decimal(x):.{floating_point}E}'
# If x is an integer, or if x when rounded is an integer (e.g. 1.999999), then return as integer:
if round(x) == round(x, floating_point):
return add_commas(round(x))
# If not, return as a float, but don't print unnecessary zeros at end:
else:
ret_x = round(x, floating_point)
return add_commas(ret_x).rstrip('0')
The provided code snippet includes necessary dependencies for implementing the `get_condition_test_performance_greater_than` function. Write a Python function `def get_condition_test_performance_greater_than(min_score: float) -> t.Callable[[pd.DataFrame], ConditionResult]` to solve the following problem:
Add condition - test metric scores are greater than the threshold. Parameters ---------- min_score : float Minimum score to pass the check. Returns ------- Callable the condition function
Here is the function:
def get_condition_test_performance_greater_than(min_score: float) -> \
t.Callable[[pd.DataFrame], ConditionResult]:
"""Add condition - test metric scores are greater than the threshold.
Parameters
----------
min_score : float
Minimum score to pass the check.
Returns
-------
Callable
the condition function
"""
def condition(check_result: pd.DataFrame):
test_scores = check_result.loc[check_result['Dataset'] == 'Test']
not_passed_test = test_scores.loc[test_scores['Value'] <= min_score]
is_passed = len(not_passed_test) == 0
has_classes = check_result.get('Class') is not None
details = ''
if not is_passed:
details += f'Found {len(not_passed_test)} scores below threshold.\n'
min_metric = test_scores.loc[test_scores['Value'].idxmin()]
details += f'Found minimum score for {min_metric["Metric"]} metric of value ' \
f'{format_number(min_metric["Value"])}'
if has_classes:
details += f' for class {min_metric.get("Class Name", min_metric["Class"])}.'
return ConditionResult(ConditionCategory.PASS if is_passed else ConditionCategory.FAIL, details)
return condition | Add condition - test metric scores are greater than the threshold. Parameters ---------- min_score : float Minimum score to pass the check. Returns ------- Callable the condition function |