File size: 4,306 Bytes
07423df |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
import json
import os
import sys
import numpy as np
import pandas as pd
import pytest
import yaml
from transformers.testing_utils import execute_subprocess_async
from llm_studio.app_utils.default_datasets import (
prepare_default_dataset_causal_language_modeling,
)
def get_experiment_status(path: str) -> str:
"""Get status information from experiment.
Args:
path: path to experiment folder
Returns:
experiment status
"""
try:
flag_json_path = os.path.join(path, "flags.json")
if not os.path.exists(flag_json_path):
return "none"
with open(flag_json_path) as file:
flags = json.load(file)
status = flags.get("status", "none")
return status
except Exception:
return "none"
@pytest.mark.parametrize(
"config_name",
[
"test_causal_language_modeling_oasst_cfg",
"test_sequence_to_sequence_modeling_oasst_cfg",
],
)
@pytest.mark.parametrize(
"metric",
[
"Perplexity",
"BLEU",
],
)
def test_oasst_training_gpu(tmp_path, config_name, metric):
run_oasst(tmp_path, config_name, metric)
@pytest.mark.parametrize(
"settings",
[
["AUC", "test_causal_binary_classification_modeling_cfg"],
["LogLoss", "test_causal_multiclass_classification_modeling_cfg"],
],
)
def test_oasst_classification_training_gpu(tmp_path, settings):
metric, config_name = settings
run_oasst(
tmp_path,
config_name=config_name,
metric=metric,
)
@pytest.mark.parametrize(
"settings",
[
["AUC", "test_causal_binary_classification_modeling_cpu_cfg"],
["LogLoss", "test_causal_multiclass_classification_modeling_cpu_cfg"],
],
)
def test_oasst_classification_training_cpu(tmp_path, settings):
metric, config_name = settings
run_oasst(
tmp_path,
config_name=config_name,
metric=metric,
)
@pytest.mark.parametrize(
"config_name",
[
"test_causal_language_modeling_oasst_cpu_cfg",
"test_sequence_to_sequence_modeling_oasst_cpu_cfg",
],
)
@pytest.mark.parametrize(
"metric",
[
"Perplexity",
"BLEU",
],
)
def test_oasst_training_cpu(tmp_path, config_name, metric):
run_oasst(tmp_path, config_name, metric)
def run_oasst(tmp_path, config_name, metric):
"""
Test training on OASST dataset.
Pytest keeps around the last 3 test runs in the tmp_path fixture.
"""
prepare_default_dataset_causal_language_modeling(tmp_path)
train_path = os.path.join(tmp_path, "train_full.pq")
# create dummy labels for classification problem type,
# unused for other problem types
df = pd.read_parquet(train_path)
df["multiclass_label"] = np.random.choice(["0", "1", "2"], size=len(df))
df["binary_label"] = np.random.choice(["0", "1"], size=len(df))
df.to_parquet(train_path)
with open(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), f"{config_name}.yaml"
),
"r",
) as fp:
cfg = yaml.load(fp, Loader=yaml.FullLoader)
# set paths and save in tmp folder
cfg["dataset"]["train_dataframe"] = train_path
cfg["output_directory"] = os.path.join(tmp_path, "output")
# set metric
cfg["prediction"]["metric"] = metric
modifed_config_path = os.path.join(tmp_path, "cfg.yaml")
with open(modifed_config_path, "w") as fp:
yaml.dump(cfg, fp)
# llm studio root directory.
root_dir = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../")
)
cmd = [
f"{sys.executable}",
os.path.join(root_dir, "train.py"),
"-Y",
f"{modifed_config_path}",
]
execute_subprocess_async(cmd)
assert os.path.exists(cfg["output_directory"])
status = get_experiment_status(path=cfg["output_directory"])
assert status == "finished"
assert os.path.exists(os.path.join(cfg["output_directory"], "charts.db"))
assert os.path.exists(os.path.join(cfg["output_directory"], "checkpoint.pth"))
assert os.path.exists(os.path.join(cfg["output_directory"], "logs.log"))
assert os.path.exists(
os.path.join(cfg["output_directory"], "validation_predictions.csv")
)
|