File size: 2,707 Bytes
3ea44b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import os
from fastapi import FastAPI, Query
from pydantic import BaseModel
from typing import List
from huggingface_hub import InferenceClient
from deep_translator import GoogleTranslator
from sse_starlette.sse import EventSourceResponse
from dotenv import load_dotenv, find_dotenv

_ = load_dotenv(find_dotenv()) # read local .env file
hf_api_key = os.environ['HF_TOKEN']

app = FastAPI()

# Initialize the InferenceClient and the translators
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1", token=hf_api_key)

# translator_to_en = GoogleTranslator(source='vietnamese', target='english')
# translator_to_ar = GoogleTranslator(source='english', target='vietnamese')

class PromptRequest(BaseModel):
    message: str
    history: List[List[str]]

class GenerateResponse(BaseModel):
    output: str

def format_prompt(message, history):
    prompt = "<s>"
    for user_prompt, bot_response in history:
        prompt += f"[INST] {user_prompt} [/INST]"
        prompt += f" {bot_response}</s> "
    prompt += f"[INST] {message} [/INST]"
    return prompt
def generate_responses(response_stream):
    for response in response_stream:
        yield response.token.text

@app.post("/generate")
async def generate(prompt_request: PromptRequest, 
             temperature: float = Query(0.9, ge=0.0, le=1.0),
             max_new_tokens: int = Query(256, ge=0, le=1048),
             top_p: float = Query(0.90, ge=0.0, le=1.0),
             repetition_penalty: float = Query(1.2, ge=1.0, le=2.0),
             stream: bool = Query(False, description="Set to True to return response stream, False to return full text")):
    formatted_prompt = format_prompt(prompt_request.message, prompt_request.history)
    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )

    if stream:
        response_stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
        return EventSourceResponse(generate_responses(response_stream), media_type="text/event-stream") # media_type="application/x-ndjson"
    else:
        response = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=True, return_full_text=False)
        return response.generated_text

@app.post("/translate")
def translate(text: str, source: str, target: str):
    if source == target:
        return {"translated_text": text}

    translator = GoogleTranslator(source=source, target=target)
    translated_text = translator.translate(text)
    return {"translated_text": translated_text}