import os import sys import torch from dotenv import find_dotenv, load_dotenv found_dotenv = find_dotenv(".env") if len(found_dotenv) == 0: found_dotenv = find_dotenv(".env.example") print(f"loading env vars from: {found_dotenv}") load_dotenv(found_dotenv, override=False) path = os.path.dirname(found_dotenv) print(f"Adding {path} to sys.path") sys.path.append(path) from llm_toolkit.translation_engine import * from llm_toolkit.translation_utils import * model_name = os.getenv("MODEL_NAME") adapter_name_or_path = os.getenv("ADAPTER_NAME_OR_PATH") load_in_4bit = os.getenv("LOAD_IN_4BIT") == "true" data_path = os.getenv("DATA_PATH") results_path = os.getenv("RESULTS_PATH") print(model_name, adapter_name_or_path, load_in_4bit, data_path, results_path) gpu_stats = torch.cuda.get_device_properties(0) start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) print(f"(1) GPU = {gpu_stats.name}. Max memory = {max_memory} GB.") print(f"{start_gpu_memory} GB of memory reserved.") model, tokenizer = load_model( model_name, load_in_4bit=load_in_4bit, adapter_name_or_path=adapter_name_or_path ) gpu_stats = torch.cuda.get_device_properties(0) start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) print(f"(2) GPU = {gpu_stats.name}. Max memory = {max_memory} GB.") print(f"{start_gpu_memory} GB of memory reserved.") datasets = load_translation_dataset(data_path, tokenizer) print("Evaluating model: " + model_name) predictions = eval_model(model, tokenizer, datasets["test"]) gpu_stats = torch.cuda.get_device_properties(0) start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) print(f"(3) GPU = {gpu_stats.name}. Max memory = {max_memory} GB.") print(f"{start_gpu_memory} GB of memory reserved.") if adapter_name_or_path is not None: model_name += "_" + adapter_name_or_path.split("/")[-1] save_results( model_name, results_path, datasets["test"], predictions, debug=True, ) metrics = calc_metrics(datasets["test"]["english"], predictions, debug=True) print(metrics)