Spaces:
Sleeping
Sleeping
File size: 8,654 Bytes
e28221f 6e2fad5 bc384a3 e28221f bc384a3 e28221f 6e2fad5 a1a3cef 40ba0ea 97b108f 6e2fad5 2da6968 3a09006 489b65b deca16d 06e3150 97b108f 40ba0ea 3a09006 489b65b 97b108f 3125c87 06e3150 3125c87 3a09006 ecec9fc 3a09006 deca16d 3a09006 deca16d 3a09006 ecec9fc 3a09006 40ba0ea 3a09006 2da6968 97b108f 2da6968 8ab8ca6 2da6968 8ab8ca6 97b108f 8ab8ca6 97b108f 2da6968 3a09006 214fb7b 3a09006 a54e7a6 e2b245b 3a09006 403b8cf a54e7a6 1b9f698 3a09006 e2b245b 3a09006 2da6968 97b108f 3a09006 ecec9fc 33e0d19 ecec9fc 3285336 ecec9fc 6e2fad5 3a09006 245d9fd a2d3414 3a09006 06a233d 3a09006 06a233d 3a09006 ecec9fc d98f847 8bc3c5d d98f847 7b40e67 d98f847 6e2fad5 3a09006 e28221f deca16d e28221f deca16d e28221f deca16d e28221f 3a09006 e28221f deca16d e28221f deca16d e28221f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 |
import argparse
import markdown2
import os
import sys
import uvicorn
from pathlib import Path
from typing import Union, Optional
from fastapi import FastAPI, Depends, HTTPException
from fastapi.responses import HTMLResponse
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from pydantic import BaseModel, Field
from sse_starlette.sse import EventSourceResponse, ServerSentEvent
from tclogger import logger
from constants.models import AVAILABLE_MODELS_DICTS, PRO_MODELS
from constants.envs import CONFIG, SECRETS
from networks.exceptions import HfApiException, INVALID_API_KEY_ERROR
from messagers.message_composer import MessageComposer
from mocks.stream_chat_mocker import stream_chat_mock
from networks.huggingface_streamer import HuggingfaceStreamer
from networks.huggingchat_streamer import HuggingchatStreamer
from networks.openai_streamer import OpenaiStreamer
from sentence_transformers import SentenceTransformer
class ChatAPIApp:
def __init__(self):
self.app = FastAPI(
docs_url="/",
title=CONFIG["app_name"],
swagger_ui_parameters={"defaultModelsExpandDepth": -1},
version=CONFIG["version"],
)
self.setup_routes()
self.embeddings = {
"mxbai-embed-large":SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1"),
"nomic-embed-text": SentenceTransformer("nomic-ai/nomic-embed-text-v1.5", trust_remote_code=True)
}
def get_available_models(self):
return {"object": "list", "data": AVAILABLE_MODELS_DICTS}
def extract_api_key(
credentials: HTTPAuthorizationCredentials = Depends(HTTPBearer()),
):
api_key = None
if credentials:
api_key = credentials.credentials
env_api_key = SECRETS["HF_LLM_API_KEY"]
return api_key
def auth_api_key(self, api_key: str):
env_api_key = SECRETS["HF_LLM_API_KEY"]
# require no api_key
if not env_api_key:
return None
# user provides HF_TOKEN
if api_key and api_key.startswith("hf_"):
return api_key
# user provides correct API_KEY
if str(api_key) == str(env_api_key):
return None
raise INVALID_API_KEY_ERROR
class ChatCompletionsPostItem(BaseModel):
model: str = Field(
default="nous-mixtral-8x7b",
description="(str) `nous-mixtral-8x7b`",
)
messages: list = Field(
default=[{"role": "user", "content": "Hello, who are you?"}],
description="(list) Messages",
)
temperature: Union[float, None] = Field(
default=0.5,
description="(float) Temperature",
)
top_p: Union[float, None] = Field(
default=0.95,
description="(float) top p",
)
max_tokens: Union[int, None] = Field(
default=-1,
description="(int) Max tokens",
)
use_cache: bool = Field(
default=False,
description="(bool) Use cache",
)
stream: bool = Field(
default=True,
description="(bool) Stream",
)
def chat_completions(
self, item: ChatCompletionsPostItem, api_key: str = Depends(extract_api_key)
):
try:
api_key = self.auth_api_key(api_key)
if item.model == "gpt-3.5-turbo":
streamer = OpenaiStreamer()
stream_response = streamer.chat_response(messages=item.messages)
elif item.model in PRO_MODELS:
streamer = HuggingchatStreamer(model=item.model)
stream_response = streamer.chat_response(
messages=item.messages,
)
else:
streamer = HuggingfaceStreamer(model=item.model)
composer = MessageComposer(model=item.model)
composer.merge(messages=item.messages)
stream_response = streamer.chat_response(
prompt=composer.merged_str,
temperature=item.temperature,
top_p=item.top_p,
max_new_tokens=item.max_tokens,
api_key=api_key,
use_cache=item.use_cache,
)
if item.stream:
event_source_response = EventSourceResponse(
streamer.chat_return_generator(stream_response),
media_type="text/event-stream",
ping=2000,
ping_message_factory=lambda: ServerSentEvent(**{"comment": ""}),
)
return event_source_response
else:
data_response = streamer.chat_return_dict(stream_response)
return data_response
except HfApiException as e:
raise HTTPException(status_code=e.status_code, detail=e.detail)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
class EmbeddingRequest(BaseModel):
model: str
prompt: str
options: Optional[dict] = None
def get_embeddings(self, request: EmbeddingRequest, api_key: str = Depends(extract_api_key)):
try:
model = request.model
model_kwargs = request.options
embeddings = self.embeddings[model].encode(request.prompt, convert_to_tensor=True)#, **model_kwargs)
return {"embedding": embeddings.tolist()}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
def get_readme(self):
readme_path = Path(__file__).parents[1] / "README.md"
with open(readme_path, "r", encoding="utf-8") as rf:
readme_str = rf.read()
readme_html = markdown2.markdown(
readme_str, extras=["table", "fenced-code-blocks", "highlightjs-lang"]
)
return readme_html
def setup_routes(self):
for prefix in ["", "/v1", "/api", "/api/v1"]:
if prefix in ["/api/v1"]:
include_in_schema = True
else:
include_in_schema = False
self.app.get(
prefix + "/models",
summary="Get available models",
include_in_schema=include_in_schema,
)(self.get_available_models)
self.app.post(
prefix + "/chat/completions",
summary="Chat completions in conversation session",
include_in_schema=include_in_schema,
)(self.chat_completions)
self.app.post(
prefix + "/generate",
summary="Chat completions in conversation session",
include_in_schema=include_in_schema,
)(self.chat_completions)
self.app.post(
prefix + "/chat",
summary="Chat completions in conversation session",
include_in_schema=include_in_schema,
)(self.chat_completions)
self.app.post(
prefix + "/embeddings",
summary="Get Embeddings with prompt",
include_in_schema=include_in_schema,
)(self.get_embeddings)
self.app.get(
"/readme",
summary="README of HF LLM API",
response_class=HTMLResponse,
include_in_schema=False,
)(self.get_readme)
class ArgParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super(ArgParser, self).__init__(*args, **kwargs)
self.add_argument(
"-s",
"--host",
type=str,
default=CONFIG["host"],
help=f"Host for {CONFIG['app_name']}",
)
self.add_argument(
"-p",
"--port",
type=int,
default=CONFIG["port"],
help=f"Port for {CONFIG['app_name']}",
)
self.add_argument(
"-d",
"--dev",
default=False,
action="store_true",
help="Run in dev mode",
)
self.args = self.parse_args(sys.argv[1:])
app = ChatAPIApp().app
if __name__ == "__main__":
args = ArgParser().args
if args.dev:
uvicorn.run("__main__:app", host=args.host, port=args.port, reload=True)
else:
uvicorn.run("__main__:app", host=args.host, port=args.port, reload=False)
# python -m apis.chat_api # [Docker] on product mode
# python -m apis.chat_api -d # [Dev] on develop mode
|