LLMServer / main /test_locally.py
AurelioAguirre's picture
This should work then
cbb7bf7
raw
history blame
1.8 kB
def test_locally(load_config, setup_logger, LLMApi):
"""Run local tests for development and debugging"""
config = load_config()
logger = setup_logger(config, "test")
logger.info("Starting local tests")
api = LLMApi(config)
model_name = config["model"]["defaults"]["model_name"]
logger.info(f"Testing with model: {model_name}")
# Test download
logger.info("Testing model download...")
api.download_model(model_name)
logger.info("Download complete")
# Test initialization
logger.info("Initializing model...")
api.initialize_model(model_name)
logger.info("Model initialized")
# Test embedding
test_text = "Dette er en test av embeddings generering fra en teknisk tekst om HMS rutiner på arbeidsplassen."
logger.info("Testing embedding generation...")
embedding = api.generate_embedding(test_text)
logger.info(f"Generated embedding of length: {len(embedding)}")
logger.info(f"First few values: {embedding[:5]}")
# Test generation
test_prompts = [
"Tell me what happens in a nuclear reactor.",
]
# Test regular generation
logger.info("Testing regular generation:")
for prompt in test_prompts:
logger.info(f"Prompt: {prompt}")
response = api.generate_response(
prompt=prompt,
system_message="You are a helpful assistant."
)
logger.info(f"Response: {response}")
# Test streaming generation
logger.info("Testing streaming generation:")
logger.info(f"Prompt: {test_prompts[0]}")
for chunk in api.generate_stream(
prompt=test_prompts[0],
system_message="You are a helpful assistant."
):
print(chunk, end="", flush=True)
print("\n")
logger.info("Local tests completed")