from fastapi import FastAPI from pydantic import BaseModel from huggingface_hub import InferenceClient app = FastAPI() client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2") class ChatRequest(BaseModel): prompt: str history: list def format_prompt(message, history): system_prompt = "You are Mistral, a gentle and a useful AI assistant. My input is " prompt = "" prompt += f"[INST] {system_prompt} [/INST]" for user_prompt, bot_response in history: prompt += f"[INST] {user_prompt} [/INST]" prompt += f" {bot_response} " prompt += f"[INST] {message} [/INST]" return prompt def generate(prompt, history, temperature=0.2, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0): temperature = float(temperature) top_p = float(top_p) generate_kwargs = dict( temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42, ) formatted_prompt = format_prompt(prompt, history) response = client.text_generation(formatted_prompt, **generate_kwargs) return response.generated_text @app.post("/generate/") async def chat(request: ChatRequest): return {"response": generate(request.prompt, request.history)}