import os from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import aiohttp HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY") def load_model(model_name): tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) return tokenizer, model async def process_text(model_name, text): tokenizer, model = load_model(model_name) prompt = f"Given the following company description, extract key products, geographies, and important keywords:\n\n{text}\n\nProducts, geographies, and keywords:" async with aiohttp.ClientSession() as session: async with session.post(f"https://api-inference.huggingface.co/models/{model_name}", headers={"Authorization": f"Bearer {HUGGINGFACE_API_KEY}"}, json={"inputs": prompt}) as response: result = await response.json() if isinstance(result, list) and len(result) > 0: return result[0].get('generated_text', '').strip() elif isinstance(result, dict): return result.get('generated_text', '').strip() else: return str(result)