LLMServer / main /utils /validation.py
AurelioAguirre's picture
This should work then
cbb7bf7
raw
history blame
1.94 kB
from typing import Dict, Any
from pathlib import Path
from dotenv import load_dotenv
from huggingface_hub import login
import os
def validate_model_path(model_path: Path) -> bool:
"""Validate that a model path exists and contains necessary files"""
if not model_path.exists():
return False
required_files = ['config.json', 'pytorch_model.bin']
return all((model_path / file).exists() for file in required_files)
def validate_generation_params(params: Dict[str, Any]) -> Dict[str, Any]:
"""Validate and normalize generation parameters"""
validated = params.copy()
# Ensure temperature is within bounds
if 'temperature' in validated:
validated['temperature'] = max(0.0, min(2.0, validated['temperature']))
# Ensure max_new_tokens is reasonable
if 'max_new_tokens' in validated:
validated['max_new_tokens'] = max(1, min(4096, validated['max_new_tokens']))
return validated
def validate_hf(setup_logger, config):
"""
Validate Hugging Face authentication.
Checks for .env file, loads environment variables, and attempts HF login if token exists.
"""
logger = setup_logger(config, "hf_validation")
# Check for .env file
env_path = Path('.env')
if env_path.exists():
logger.info("Found .env file, loading environment variables")
load_dotenv()
else:
logger.warning("No .env file found. Fine if you're on Huggingface, but you need one to run locally on your PC.")
# Check for HF token
hf_token = os.getenv('HF_TOKEN')
if not hf_token:
logger.error("No HF_TOKEN found in environment variables")
return False
try:
# Attempt login
login(token=hf_token)
logger.info("Successfully authenticated with Hugging Face")
return True
except Exception as e:
logger.error(f"Failed to authenticate with Hugging Face: {str(e)}")
return False