Spaces:
Running
Running
ruslanmv
commited on
Commit
•
6ec7105
0
Parent(s):
First commit
Browse files- .github/workflows/sync_to_hf_space.yml +18 -0
- .gitignore +2 -0
- Dockerfile +7 -0
- README.md +164 -0
- __init__.py +0 -0
- apis/__init__.py +0 -0
- apis/chat_api.py +214 -0
- examples/__init__.py +0 -0
- examples/chat_with_openai.py +25 -0
- examples/chat_with_post.py +55 -0
- messagers/__init__.py +0 -0
- messagers/message_composer.py +247 -0
- messagers/message_outputer.py +65 -0
- mocks/__init__.py +0 -0
- mocks/stream_chat_mocker.py +13 -0
- networks/__init__.py +0 -0
- networks/message_streamer.py +212 -0
- requirements.txt +12 -0
- utils/__init__.py +69 -0
- utils/enver.py +60 -0
- utils/logger.py +269 -0
.github/workflows/sync_to_hf_space.yml
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Sync to Hugging Face hub
|
2 |
+
on:
|
3 |
+
push:
|
4 |
+
branches: [main]
|
5 |
+
workflow_dispatch:
|
6 |
+
|
7 |
+
jobs:
|
8 |
+
sync-to-hub:
|
9 |
+
runs-on: ubuntu-latest
|
10 |
+
steps:
|
11 |
+
- uses: actions/checkout@v3
|
12 |
+
with:
|
13 |
+
fetch-depth: 0
|
14 |
+
lfs: true
|
15 |
+
- name: Push to hub
|
16 |
+
env:
|
17 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
18 |
+
run: git push -f https://ruslanmv:$HF_TOKEN@huggingface.co/spaces/ruslanmv/hf-llm-api-collection main
|
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
secrets.json
|
2 |
+
__pycache__
|
Dockerfile
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11-slim
|
2 |
+
WORKDIR $HOME/app
|
3 |
+
COPY . .
|
4 |
+
RUN pip install -r requirements.txt
|
5 |
+
VOLUME /data
|
6 |
+
EXPOSE 23333
|
7 |
+
CMD ["python", "-m", "apis.chat_api"]
|
README.md
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: HF LLM API COLLECTION
|
3 |
+
emoji: ☯️
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: gray
|
6 |
+
sdk: docker
|
7 |
+
app_port: 23333
|
8 |
+
---
|
9 |
+
|
10 |
+
## HF-LLM-API
|
11 |
+
Huggingface LLM Inference API in OpenAI message format.
|
12 |
+
|
13 |
+
Project link: https://github.com/ruslanmv/hf-llm-api-collection
|
14 |
+
|
15 |
+
## Features
|
16 |
+
|
17 |
+
- Available Models (2024/01/22): [#5](https://github.com/Hansimov/hf-llm-api/issues/5)
|
18 |
+
- `mistral-7b`, `mixtral-8x7b`, `nous-mixtral-8x7b`
|
19 |
+
- Adaptive prompt templates for different models
|
20 |
+
- Support OpenAI API format
|
21 |
+
- Enable api endpoint via official `openai-python` package
|
22 |
+
- Support both stream and no-stream response
|
23 |
+
- Support API Key via both HTTP auth header and env varible [#4](https://github.com/Hansimov/hf-llm-api/issues/4)
|
24 |
+
- Docker deployment
|
25 |
+
|
26 |
+
## Run API service
|
27 |
+
|
28 |
+
### Run in Command Line
|
29 |
+
|
30 |
+
**Install dependencies:**
|
31 |
+
|
32 |
+
```bash
|
33 |
+
# pipreqs . --force --mode no-pin
|
34 |
+
pip install -r requirements.txt
|
35 |
+
```
|
36 |
+
|
37 |
+
**Run API:**
|
38 |
+
|
39 |
+
```bash
|
40 |
+
python -m apis.chat_api
|
41 |
+
```
|
42 |
+
|
43 |
+
## Run via Docker
|
44 |
+
|
45 |
+
**Docker build:**
|
46 |
+
|
47 |
+
```bash
|
48 |
+
sudo docker build -t hf-llm-api:1.0 . --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy
|
49 |
+
```
|
50 |
+
|
51 |
+
**Docker run:**
|
52 |
+
|
53 |
+
```bash
|
54 |
+
# no proxy
|
55 |
+
sudo docker run -p 23333:23333 hf-llm-api:1.0
|
56 |
+
|
57 |
+
# with proxy
|
58 |
+
sudo docker run -p 23333:23333 --env http_proxy="http://<server>:<port>" hf-llm-api:1.0
|
59 |
+
```
|
60 |
+
|
61 |
+
## API Usage
|
62 |
+
|
63 |
+
### Using `openai-python`
|
64 |
+
|
65 |
+
See: [`examples/chat_with_openai.py`](https://github.com/ruslanmv/hf-llm-api-collection/blob/main/examples/chat_with_openai.py)
|
66 |
+
|
67 |
+
```py
|
68 |
+
from openai import OpenAI
|
69 |
+
|
70 |
+
# If runnning this service with proxy, you might need to unset `http(s)_proxy`.
|
71 |
+
base_url = "http://127.0.0.1:23333"
|
72 |
+
# Your own HF_TOKEN
|
73 |
+
api_key = "hf_xxxxxxxxxxxxxxxx"
|
74 |
+
# use below as non-auth user
|
75 |
+
# api_key = "sk-xxx"
|
76 |
+
|
77 |
+
client = OpenAI(base_url=base_url, api_key=api_key)
|
78 |
+
response = client.chat.completions.create(
|
79 |
+
model="mixtral-8x7b",
|
80 |
+
messages=[
|
81 |
+
{
|
82 |
+
"role": "user",
|
83 |
+
"content": "what is your model",
|
84 |
+
}
|
85 |
+
],
|
86 |
+
stream=True,
|
87 |
+
)
|
88 |
+
|
89 |
+
for chunk in response:
|
90 |
+
if chunk.choices[0].delta.content is not None:
|
91 |
+
print(chunk.choices[0].delta.content, end="", flush=True)
|
92 |
+
elif chunk.choices[0].finish_reason == "stop":
|
93 |
+
print()
|
94 |
+
else:
|
95 |
+
pass
|
96 |
+
```
|
97 |
+
|
98 |
+
### Using post requests
|
99 |
+
|
100 |
+
See: [`examples/chat_with_post.py`](https://github.com/ruslanmv/hf-llm-api-collection/blob/main/examples/chat_with_post.py)
|
101 |
+
|
102 |
+
|
103 |
+
```py
|
104 |
+
import ast
|
105 |
+
import httpx
|
106 |
+
import json
|
107 |
+
import re
|
108 |
+
|
109 |
+
# If runnning this service with proxy, you might need to unset `http(s)_proxy`.
|
110 |
+
chat_api = "http://127.0.0.1:23333"
|
111 |
+
# Your own HF_TOKEN
|
112 |
+
api_key = "hf_xxxxxxxxxxxxxxxx"
|
113 |
+
# use below as non-auth user
|
114 |
+
# api_key = "sk-xxx"
|
115 |
+
|
116 |
+
requests_headers = {}
|
117 |
+
requests_payload = {
|
118 |
+
"model": "mixtral-8x7b",
|
119 |
+
"messages": [
|
120 |
+
{
|
121 |
+
"role": "user",
|
122 |
+
"content": "what is your model",
|
123 |
+
}
|
124 |
+
],
|
125 |
+
"stream": True,
|
126 |
+
}
|
127 |
+
|
128 |
+
with httpx.stream(
|
129 |
+
"POST",
|
130 |
+
chat_api + "/chat/completions",
|
131 |
+
headers=requests_headers,
|
132 |
+
json=requests_payload,
|
133 |
+
timeout=httpx.Timeout(connect=20, read=60, write=20, pool=None),
|
134 |
+
) as response:
|
135 |
+
# https://docs.aiohttp.org/en/stable/streams.html
|
136 |
+
# https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb
|
137 |
+
response_content = ""
|
138 |
+
for line in response.iter_lines():
|
139 |
+
remove_patterns = [r"^\s*data:\s*", r"^\s*\[DONE\]\s*"]
|
140 |
+
for pattern in remove_patterns:
|
141 |
+
line = re.sub(pattern, "", line).strip()
|
142 |
+
|
143 |
+
if line:
|
144 |
+
try:
|
145 |
+
line_data = json.loads(line)
|
146 |
+
except Exception as e:
|
147 |
+
try:
|
148 |
+
line_data = ast.literal_eval(line)
|
149 |
+
except:
|
150 |
+
print(f"Error: {line}")
|
151 |
+
raise e
|
152 |
+
# print(f"line: {line_data}")
|
153 |
+
delta_data = line_data["choices"][0]["delta"]
|
154 |
+
finish_reason = line_data["choices"][0]["finish_reason"]
|
155 |
+
if "role" in delta_data:
|
156 |
+
role = delta_data["role"]
|
157 |
+
if "content" in delta_data:
|
158 |
+
delta_content = delta_data["content"]
|
159 |
+
response_content += delta_content
|
160 |
+
print(delta_content, end="", flush=True)
|
161 |
+
if finish_reason == "stop":
|
162 |
+
print()
|
163 |
+
|
164 |
+
```
|
__init__.py
ADDED
File without changes
|
apis/__init__.py
ADDED
File without changes
|
apis/chat_api.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import markdown2
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
import uvicorn
|
6 |
+
|
7 |
+
from pathlib import Path
|
8 |
+
from fastapi import FastAPI, Depends
|
9 |
+
from fastapi.responses import HTMLResponse
|
10 |
+
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
11 |
+
from pydantic import BaseModel, Field
|
12 |
+
from typing import Union
|
13 |
+
from sse_starlette.sse import EventSourceResponse, ServerSentEvent
|
14 |
+
from utils.logger import logger
|
15 |
+
from networks.message_streamer import MessageStreamer
|
16 |
+
from messagers.message_composer import MessageComposer
|
17 |
+
from mocks.stream_chat_mocker import stream_chat_mock
|
18 |
+
|
19 |
+
|
20 |
+
class ChatAPIApp:
|
21 |
+
def __init__(self):
|
22 |
+
self.app = FastAPI(
|
23 |
+
docs_url="/",
|
24 |
+
title="HuggingFace LLM API",
|
25 |
+
swagger_ui_parameters={"defaultModelsExpandDepth": -1},
|
26 |
+
version="1.0",
|
27 |
+
)
|
28 |
+
self.setup_routes()
|
29 |
+
|
30 |
+
def get_available_models(self):
|
31 |
+
# https://platform.openai.com/docs/api-reference/models/list
|
32 |
+
# ANCHOR[id=available-models]: Available models
|
33 |
+
self.available_models = {
|
34 |
+
"object": "list",
|
35 |
+
"data": [
|
36 |
+
{
|
37 |
+
"id": "mixtral-8x7b",
|
38 |
+
"description": "[mistralai/Mixtral-8x7B-Instruct-v0.1]: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1",
|
39 |
+
"object": "model",
|
40 |
+
"created": 1700000000,
|
41 |
+
"owned_by": "mistralai",
|
42 |
+
},
|
43 |
+
{
|
44 |
+
"id": "mistral-7b",
|
45 |
+
"description": "[mistralai/Mistral-7B-Instruct-v0.2]: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
|
46 |
+
"object": "model",
|
47 |
+
"created": 1700000000,
|
48 |
+
"owned_by": "mistralai",
|
49 |
+
},
|
50 |
+
{
|
51 |
+
"id": "nous-mixtral-8x7b",
|
52 |
+
"description": "[NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO]: https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
53 |
+
"object": "model",
|
54 |
+
"created": 1700000000,
|
55 |
+
"owned_by": "NousResearch",
|
56 |
+
},
|
57 |
+
],
|
58 |
+
}
|
59 |
+
return self.available_models
|
60 |
+
|
61 |
+
def extract_api_key(
|
62 |
+
credentials: HTTPAuthorizationCredentials = Depends(
|
63 |
+
HTTPBearer(auto_error=False)
|
64 |
+
),
|
65 |
+
):
|
66 |
+
api_key = None
|
67 |
+
if credentials:
|
68 |
+
api_key = credentials.credentials
|
69 |
+
else:
|
70 |
+
api_key = os.getenv("HF_TOKEN")
|
71 |
+
|
72 |
+
if api_key:
|
73 |
+
if api_key.startswith("hf_"):
|
74 |
+
return api_key
|
75 |
+
else:
|
76 |
+
logger.warn(f"Invalid HF Token!")
|
77 |
+
else:
|
78 |
+
logger.warn("Not provide HF Token!")
|
79 |
+
return None
|
80 |
+
|
81 |
+
class ChatCompletionsPostItem(BaseModel):
|
82 |
+
model: str = Field(
|
83 |
+
default="mixtral-8x7b",
|
84 |
+
description="(str) `mixtral-8x7b`",
|
85 |
+
)
|
86 |
+
messages: list = Field(
|
87 |
+
default=[{"role": "user", "content": "Hello, who are you?"}],
|
88 |
+
description="(list) Messages",
|
89 |
+
)
|
90 |
+
temperature: Union[float, None] = Field(
|
91 |
+
default=0.5,
|
92 |
+
description="(float) Temperature",
|
93 |
+
)
|
94 |
+
top_p: Union[float, None] = Field(
|
95 |
+
default=0.95,
|
96 |
+
description="(float) top p",
|
97 |
+
)
|
98 |
+
max_tokens: Union[int, None] = Field(
|
99 |
+
default=-1,
|
100 |
+
description="(int) Max tokens",
|
101 |
+
)
|
102 |
+
use_cache: bool = Field(
|
103 |
+
default=False,
|
104 |
+
description="(bool) Use cache",
|
105 |
+
)
|
106 |
+
stream: bool = Field(
|
107 |
+
default=True,
|
108 |
+
description="(bool) Stream",
|
109 |
+
)
|
110 |
+
|
111 |
+
def chat_completions(
|
112 |
+
self, item: ChatCompletionsPostItem, api_key: str = Depends(extract_api_key)
|
113 |
+
):
|
114 |
+
streamer = MessageStreamer(model=item.model)
|
115 |
+
composer = MessageComposer(model=item.model)
|
116 |
+
composer.merge(messages=item.messages)
|
117 |
+
# streamer.chat = stream_chat_mock
|
118 |
+
|
119 |
+
stream_response = streamer.chat_response(
|
120 |
+
prompt=composer.merged_str,
|
121 |
+
temperature=item.temperature,
|
122 |
+
top_p=item.top_p,
|
123 |
+
max_new_tokens=item.max_tokens,
|
124 |
+
api_key=api_key,
|
125 |
+
use_cache=item.use_cache,
|
126 |
+
)
|
127 |
+
if item.stream:
|
128 |
+
event_source_response = EventSourceResponse(
|
129 |
+
streamer.chat_return_generator(stream_response),
|
130 |
+
media_type="text/event-stream",
|
131 |
+
ping=2000,
|
132 |
+
ping_message_factory=lambda: ServerSentEvent(**{"comment": ""}),
|
133 |
+
)
|
134 |
+
return event_source_response
|
135 |
+
else:
|
136 |
+
data_response = streamer.chat_return_dict(stream_response)
|
137 |
+
return data_response
|
138 |
+
|
139 |
+
def get_readme(self):
|
140 |
+
readme_path = Path(__file__).parents[1] / "README.md"
|
141 |
+
with open(readme_path, "r", encoding="utf-8") as rf:
|
142 |
+
readme_str = rf.read()
|
143 |
+
readme_html = markdown2.markdown(
|
144 |
+
readme_str, extras=["table", "fenced-code-blocks", "highlightjs-lang"]
|
145 |
+
)
|
146 |
+
return readme_html
|
147 |
+
|
148 |
+
def setup_routes(self):
|
149 |
+
for prefix in ["", "/v1", "/api", "/api/v1"]:
|
150 |
+
if prefix in ["/api/v1"]:
|
151 |
+
include_in_schema = True
|
152 |
+
else:
|
153 |
+
include_in_schema = False
|
154 |
+
|
155 |
+
self.app.get(
|
156 |
+
prefix + "/models",
|
157 |
+
summary="Get available models",
|
158 |
+
include_in_schema=include_in_schema,
|
159 |
+
)(self.get_available_models)
|
160 |
+
|
161 |
+
self.app.post(
|
162 |
+
prefix + "/chat/completions",
|
163 |
+
summary="Chat completions in conversation session",
|
164 |
+
include_in_schema=include_in_schema,
|
165 |
+
)(self.chat_completions)
|
166 |
+
self.app.get(
|
167 |
+
"/readme",
|
168 |
+
summary="README of HF LLM API",
|
169 |
+
response_class=HTMLResponse,
|
170 |
+
include_in_schema=False,
|
171 |
+
)(self.get_readme)
|
172 |
+
|
173 |
+
|
174 |
+
class ArgParser(argparse.ArgumentParser):
|
175 |
+
def __init__(self, *args, **kwargs):
|
176 |
+
super(ArgParser, self).__init__(*args, **kwargs)
|
177 |
+
|
178 |
+
self.add_argument(
|
179 |
+
"-s",
|
180 |
+
"--server",
|
181 |
+
type=str,
|
182 |
+
default="0.0.0.0",
|
183 |
+
help="Server IP for HF LLM Chat API",
|
184 |
+
)
|
185 |
+
self.add_argument(
|
186 |
+
"-p",
|
187 |
+
"--port",
|
188 |
+
type=int,
|
189 |
+
default=23333,
|
190 |
+
help="Server Port for HF LLM Chat API",
|
191 |
+
)
|
192 |
+
|
193 |
+
self.add_argument(
|
194 |
+
"-d",
|
195 |
+
"--dev",
|
196 |
+
default=False,
|
197 |
+
action="store_true",
|
198 |
+
help="Run in dev mode",
|
199 |
+
)
|
200 |
+
|
201 |
+
self.args = self.parse_args(sys.argv[1:])
|
202 |
+
|
203 |
+
|
204 |
+
app = ChatAPIApp().app
|
205 |
+
|
206 |
+
if __name__ == "__main__":
|
207 |
+
args = ArgParser().args
|
208 |
+
if args.dev:
|
209 |
+
uvicorn.run("__main__:app", host=args.server, port=args.port, reload=True)
|
210 |
+
else:
|
211 |
+
uvicorn.run("__main__:app", host=args.server, port=args.port, reload=False)
|
212 |
+
|
213 |
+
# python -m apis.chat_api # [Docker] on product mode
|
214 |
+
# python -m apis.chat_api -d # [Dev] on develop mode
|
examples/__init__.py
ADDED
File without changes
|
examples/chat_with_openai.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
|
3 |
+
# If runnning this service with proxy, you might need to unset `http(s)_proxy`.
|
4 |
+
base_url = "http://127.0.0.1:23333"
|
5 |
+
api_key = "sk-xxxxx"
|
6 |
+
|
7 |
+
client = OpenAI(base_url=base_url, api_key=api_key)
|
8 |
+
response = client.chat.completions.create(
|
9 |
+
model="mixtral-8x7b",
|
10 |
+
messages=[
|
11 |
+
{
|
12 |
+
"role": "user",
|
13 |
+
"content": "what is your model",
|
14 |
+
}
|
15 |
+
],
|
16 |
+
stream=True,
|
17 |
+
)
|
18 |
+
|
19 |
+
for chunk in response:
|
20 |
+
if chunk.choices[0].delta.content is not None:
|
21 |
+
print(chunk.choices[0].delta.content, end="", flush=True)
|
22 |
+
elif chunk.choices[0].finish_reason == "stop":
|
23 |
+
print()
|
24 |
+
else:
|
25 |
+
pass
|
examples/chat_with_post.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ast
|
2 |
+
import httpx
|
3 |
+
import json
|
4 |
+
import re
|
5 |
+
|
6 |
+
# If runnning this service with proxy, you might need to unset `http(s)_proxy`.
|
7 |
+
chat_api = "http://127.0.0.1:23333"
|
8 |
+
api_key = "sk-xxxxx"
|
9 |
+
requests_headers = {}
|
10 |
+
requests_payload = {
|
11 |
+
"model": "mixtral-8x7b",
|
12 |
+
"messages": [
|
13 |
+
{
|
14 |
+
"role": "user",
|
15 |
+
"content": "what is your model",
|
16 |
+
}
|
17 |
+
],
|
18 |
+
"stream": True,
|
19 |
+
}
|
20 |
+
|
21 |
+
with httpx.stream(
|
22 |
+
"POST",
|
23 |
+
chat_api + "/chat/completions",
|
24 |
+
headers=requests_headers,
|
25 |
+
json=requests_payload,
|
26 |
+
timeout=httpx.Timeout(connect=20, read=60, write=20, pool=None),
|
27 |
+
) as response:
|
28 |
+
# https://docs.aiohttp.org/en/stable/streams.html
|
29 |
+
# https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb
|
30 |
+
response_content = ""
|
31 |
+
for line in response.iter_lines():
|
32 |
+
remove_patterns = [r"^\s*data:\s*", r"^\s*\[DONE\]\s*"]
|
33 |
+
for pattern in remove_patterns:
|
34 |
+
line = re.sub(pattern, "", line).strip()
|
35 |
+
|
36 |
+
if line:
|
37 |
+
try:
|
38 |
+
line_data = json.loads(line)
|
39 |
+
except Exception as e:
|
40 |
+
try:
|
41 |
+
line_data = ast.literal_eval(line)
|
42 |
+
except:
|
43 |
+
print(f"Error: {line}")
|
44 |
+
raise e
|
45 |
+
# print(f"line: {line_data}")
|
46 |
+
delta_data = line_data["choices"][0]["delta"]
|
47 |
+
finish_reason = line_data["choices"][0]["finish_reason"]
|
48 |
+
if "role" in delta_data:
|
49 |
+
role = delta_data["role"]
|
50 |
+
if "content" in delta_data:
|
51 |
+
delta_content = delta_data["content"]
|
52 |
+
response_content += delta_content
|
53 |
+
print(delta_content, end="", flush=True)
|
54 |
+
if finish_reason == "stop":
|
55 |
+
print()
|
messagers/__init__.py
ADDED
File without changes
|
messagers/message_composer.py
ADDED
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
from pprint import pprint
|
3 |
+
from utils.logger import logger
|
4 |
+
|
5 |
+
|
6 |
+
class MessageComposer:
|
7 |
+
# LINK - apis/chat_api.py#available-models
|
8 |
+
AVALAIBLE_MODELS = [
|
9 |
+
"mixtral-8x7b",
|
10 |
+
"mistral-7b",
|
11 |
+
"openchat-3.5",
|
12 |
+
"nous-mixtral-8x7b",
|
13 |
+
]
|
14 |
+
|
15 |
+
def __init__(self, model: str = None):
|
16 |
+
if model in self.AVALAIBLE_MODELS:
|
17 |
+
self.model = model
|
18 |
+
else:
|
19 |
+
self.model = "mixtral-8x7b"
|
20 |
+
self.system_roles = ["system"]
|
21 |
+
self.inst_roles = ["user", "system", "inst"]
|
22 |
+
self.answer_roles = ["assistant", "bot", "answer"]
|
23 |
+
self.default_role = "user"
|
24 |
+
|
25 |
+
def concat_messages_by_role(self, messages):
|
26 |
+
def is_same_role(role1, role2):
|
27 |
+
if (
|
28 |
+
(role1 == role2)
|
29 |
+
or (role1 in self.inst_roles and role2 in self.inst_roles)
|
30 |
+
or (role1 in self.answer_roles and role2 in self.answer_roles)
|
31 |
+
):
|
32 |
+
return True
|
33 |
+
else:
|
34 |
+
return False
|
35 |
+
|
36 |
+
concat_messages = []
|
37 |
+
for message in messages:
|
38 |
+
role = message["role"]
|
39 |
+
content = message["content"]
|
40 |
+
if concat_messages and is_same_role(role, concat_messages[-1]["role"]):
|
41 |
+
concat_messages[-1]["content"] += "\n" + content
|
42 |
+
else:
|
43 |
+
if role in self.inst_roles:
|
44 |
+
message["role"] = "inst"
|
45 |
+
elif role in self.answer_roles:
|
46 |
+
message["role"] = "answer"
|
47 |
+
else:
|
48 |
+
message["role"] = "inst"
|
49 |
+
concat_messages.append(message)
|
50 |
+
return concat_messages
|
51 |
+
|
52 |
+
def merge(self, messages) -> str:
|
53 |
+
# Mistral and Mixtral:
|
54 |
+
# <s> [INST] Instruction [/INST] Model answer </s> [INST] Follow-up instruction [/INST]
|
55 |
+
|
56 |
+
# OpenChat:
|
57 |
+
# GPT4 Correct User: Hello<|end_of_turn|>GPT4 Correct Assistant: Hi<|end_of_turn|>GPT4 Correct User: How are you today?<|end_of_turn|>GPT4 Correct Assistant:
|
58 |
+
|
59 |
+
# Nous Mixtral:
|
60 |
+
# <|im_start|>system
|
61 |
+
# You are "Hermes 2".<|im_end|>
|
62 |
+
# <|im_start|>user
|
63 |
+
# Hello, who are you?<|im_end|>
|
64 |
+
# <|im_start|>assistant
|
65 |
+
|
66 |
+
self.messages = messages
|
67 |
+
self.merged_str = ""
|
68 |
+
|
69 |
+
# https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1#instruction-format
|
70 |
+
if self.model in ["mixtral-8x7b", "mistral-7b"]:
|
71 |
+
self.messages = self.concat_messages_by_role(messages)
|
72 |
+
self.cached_str = ""
|
73 |
+
for message in self.messages:
|
74 |
+
role = message["role"]
|
75 |
+
content = message["content"]
|
76 |
+
if role in self.inst_roles:
|
77 |
+
self.cached_str = f"[INST] {content} [/INST]"
|
78 |
+
elif role in self.answer_roles:
|
79 |
+
self.merged_str += f"<s> {self.cached_str} {content} </s>\n"
|
80 |
+
self.cached_str = ""
|
81 |
+
else:
|
82 |
+
self.cached_str = f"[INST] {content} [/INST]"
|
83 |
+
if self.cached_str:
|
84 |
+
self.merged_str += f"{self.cached_str}"
|
85 |
+
# https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO#prompt-format
|
86 |
+
elif self.model in ["nous-mixtral-8x7b"]:
|
87 |
+
self.merged_str_list = []
|
88 |
+
for message in self.messages:
|
89 |
+
role = message["role"]
|
90 |
+
content = message["content"]
|
91 |
+
if role not in ["system", "user", "assistant"]:
|
92 |
+
role = self.default_role
|
93 |
+
message_line = f"<|im_start|>{role}\n{content}<|im_end|>"
|
94 |
+
self.merged_str_list.append(message_line)
|
95 |
+
self.merged_str_list.append("<|im_start|>assistant")
|
96 |
+
self.merged_str = "\n".join(self.merged_str_list)
|
97 |
+
# https://huggingface.co/openchat/openchat-3.5-0106
|
98 |
+
elif self.model in ["openchat-3.5"]:
|
99 |
+
self.messages = self.concat_messages_by_role(messages)
|
100 |
+
self.merged_str_list = []
|
101 |
+
self.end_of_turn = "<|end_of_turn|>"
|
102 |
+
for message in self.messages:
|
103 |
+
role = message["role"]
|
104 |
+
content = message["content"]
|
105 |
+
if role in self.inst_roles:
|
106 |
+
self.merged_str_list.append(
|
107 |
+
f"GPT4 Correct User:\n{content}{self.end_of_turn}"
|
108 |
+
)
|
109 |
+
elif role in self.answer_roles:
|
110 |
+
self.merged_str_list.append(
|
111 |
+
f"GPT4 Correct Assistant:\n{content}{self.end_of_turn}"
|
112 |
+
)
|
113 |
+
else:
|
114 |
+
self.merged_str_list.append(
|
115 |
+
f"GPT4 Correct User: {content}{self.end_of_turn}"
|
116 |
+
)
|
117 |
+
self.merged_str_list.append(f"GPT4 Correct Assistant:\n")
|
118 |
+
self.merged_str = "\n".join(self.merged_str_list)
|
119 |
+
else:
|
120 |
+
self.merged_str = "\n".join(
|
121 |
+
[
|
122 |
+
f'`{message["role"]}`:\n{message["content"]}\n'
|
123 |
+
for message in self.messages
|
124 |
+
]
|
125 |
+
)
|
126 |
+
|
127 |
+
return self.merged_str
|
128 |
+
|
129 |
+
def convert_pair_matches_to_messages(self, pair_matches_list):
|
130 |
+
messages = []
|
131 |
+
if len(pair_matches_list) <= 0:
|
132 |
+
messages = [
|
133 |
+
{
|
134 |
+
"role": "user",
|
135 |
+
"content": self.merged_str,
|
136 |
+
}
|
137 |
+
]
|
138 |
+
else:
|
139 |
+
for match in pair_matches_list:
|
140 |
+
inst = match.group("inst")
|
141 |
+
answer = match.group("answer")
|
142 |
+
messages.extend(
|
143 |
+
[
|
144 |
+
{"role": "user", "content": inst.strip()},
|
145 |
+
{"role": "assistant", "content": answer.strip()},
|
146 |
+
]
|
147 |
+
)
|
148 |
+
return messages
|
149 |
+
|
150 |
+
def append_last_instruction_to_messages(self, inst_matches_list, pair_matches_list):
|
151 |
+
if len(inst_matches_list) > len(pair_matches_list):
|
152 |
+
self.messages.extend(
|
153 |
+
[
|
154 |
+
{
|
155 |
+
"role": "user",
|
156 |
+
"content": inst_matches_list[-1].group("inst").strip(),
|
157 |
+
}
|
158 |
+
]
|
159 |
+
)
|
160 |
+
|
161 |
+
def split(self, merged_str) -> list:
|
162 |
+
self.merged_str = merged_str
|
163 |
+
self.messages = []
|
164 |
+
|
165 |
+
if self.model in ["mixtral-8x7b", "mistral-7b"]:
|
166 |
+
pair_pattern = (
|
167 |
+
r"<s>\s*\[INST\](?P<inst>[\s\S]*?)\[/INST\](?P<answer>[\s\S]*?)</s>"
|
168 |
+
)
|
169 |
+
pair_matches = re.finditer(pair_pattern, self.merged_str, re.MULTILINE)
|
170 |
+
pair_matches_list = list(pair_matches)
|
171 |
+
|
172 |
+
self.messages = self.convert_pair_matches_to_messages(pair_matches_list)
|
173 |
+
|
174 |
+
inst_pattern = r"\[INST\](?P<inst>[\s\S]*?)\[/INST\]"
|
175 |
+
inst_matches = re.finditer(inst_pattern, self.merged_str, re.MULTILINE)
|
176 |
+
inst_matches_list = list(inst_matches)
|
177 |
+
|
178 |
+
self.append_last_instruction_to_messages(
|
179 |
+
inst_matches_list, pair_matches_list
|
180 |
+
)
|
181 |
+
elif self.model in ["nous-mixtral-8x7b"]:
|
182 |
+
# https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO#prompt-format
|
183 |
+
# message_pattern = r"<\|im_start\|>(?P<role>system|user|assistant)[\s\n]*(?P<content>[\s\S]*?)<\|im_end\|>"
|
184 |
+
message_pattern = r"<\|im_start\|>(?P<role>system|user|assistant)[\s\n]*(?P<content>[\s\S]*?)<\|im_end\|>"
|
185 |
+
message_matches = re.finditer(
|
186 |
+
message_pattern, self.merged_str, flags=re.MULTILINE | re.IGNORECASE
|
187 |
+
)
|
188 |
+
message_matches_list = list(message_matches)
|
189 |
+
logger.note(f"message_matches_list: {message_matches_list}")
|
190 |
+
for match in message_matches_list:
|
191 |
+
role = match.group("role")
|
192 |
+
content = match.group("content")
|
193 |
+
self.messages.append({"role": role, "content": content.strip()})
|
194 |
+
elif self.model in ["openchat-3.5"]:
|
195 |
+
pair_pattern = r"GPT4 Correct User:(?P<inst>[\s\S]*?)<\|end_of_turn\|>\s*GPT4 Correct Assistant:(?P<answer>[\s\S]*?)<\|end_of_turn\|>"
|
196 |
+
pair_matches = re.finditer(
|
197 |
+
pair_pattern, self.merged_str, flags=re.MULTILINE | re.IGNORECASE
|
198 |
+
)
|
199 |
+
pair_matches_list = list(pair_matches)
|
200 |
+
self.messages = self.convert_pair_matches_to_messages(pair_matches_list)
|
201 |
+
inst_pattern = r"GPT4 Correct User:(?P<inst>[\s\S]*?)<\|end_of_turn\|>"
|
202 |
+
inst_matches = re.finditer(
|
203 |
+
inst_pattern, self.merged_str, flags=re.MULTILINE | re.IGNORECASE
|
204 |
+
)
|
205 |
+
inst_matches_list = list(inst_matches)
|
206 |
+
self.append_last_instruction_to_messages(
|
207 |
+
inst_matches_list, pair_matches_list
|
208 |
+
)
|
209 |
+
else:
|
210 |
+
self.messages = [
|
211 |
+
{
|
212 |
+
"role": "user",
|
213 |
+
"content": self.merged_str,
|
214 |
+
}
|
215 |
+
]
|
216 |
+
|
217 |
+
return self.messages
|
218 |
+
|
219 |
+
|
220 |
+
if __name__ == "__main__":
|
221 |
+
model = "mixtral-8x7b"
|
222 |
+
# model = "nous-mixtral-8x7b"
|
223 |
+
composer = MessageComposer(model)
|
224 |
+
messages = [
|
225 |
+
{
|
226 |
+
"role": "system",
|
227 |
+
"content": "You are a LLM developed by OpenAI.\nYour name is GPT-4.",
|
228 |
+
},
|
229 |
+
{"role": "user", "content": "Hello, who are you?"},
|
230 |
+
{"role": "assistant", "content": "I am a bot."},
|
231 |
+
{"role": "user", "content": "What is your name?"},
|
232 |
+
# {"role": "assistant", "content": "My name is Bing."},
|
233 |
+
# {"role": "user", "content": "Tell me a joke."},
|
234 |
+
# {"role": "assistant", "content": "What is a robot's favorite type of music?"},
|
235 |
+
# {
|
236 |
+
# "role": "user",
|
237 |
+
# "content": "How many questions have I asked? Please list them.",
|
238 |
+
# },
|
239 |
+
]
|
240 |
+
logger.note(f"model: {composer.model}")
|
241 |
+
merged_str = composer.merge(messages)
|
242 |
+
logger.note("merged_str:")
|
243 |
+
logger.mesg(merged_str)
|
244 |
+
logger.note("splitted messages:")
|
245 |
+
pprint(composer.split(merged_str))
|
246 |
+
# logger.note("merged merged_str:")
|
247 |
+
# logger.mesg(composer.merge(composer.split(merged_str)))
|
messagers/message_outputer.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
|
4 |
+
class OpenaiStreamOutputer:
|
5 |
+
"""
|
6 |
+
Create chat completion - OpenAI API Documentation
|
7 |
+
* https://platform.openai.com/docs/api-reference/chat/create
|
8 |
+
"""
|
9 |
+
|
10 |
+
def __init__(self):
|
11 |
+
self.default_data = {
|
12 |
+
"created": 1700000000,
|
13 |
+
"id": "chatcmpl-hugginface",
|
14 |
+
"object": "chat.completion.chunk",
|
15 |
+
# "content_type": "Completions",
|
16 |
+
"model": "hugginface",
|
17 |
+
"choices": [],
|
18 |
+
}
|
19 |
+
|
20 |
+
def data_to_string(self, data={}, content_type=""):
|
21 |
+
data_str = f"{json.dumps(data)}"
|
22 |
+
return data_str
|
23 |
+
|
24 |
+
def output(self, content=None, content_type="Completions") -> str:
|
25 |
+
data = self.default_data.copy()
|
26 |
+
if content_type == "Role":
|
27 |
+
data["choices"] = [
|
28 |
+
{
|
29 |
+
"index": 0,
|
30 |
+
"delta": {"role": "assistant"},
|
31 |
+
"finish_reason": None,
|
32 |
+
}
|
33 |
+
]
|
34 |
+
elif content_type in [
|
35 |
+
"Completions",
|
36 |
+
"InternalSearchQuery",
|
37 |
+
"InternalSearchResult",
|
38 |
+
"SuggestedResponses",
|
39 |
+
]:
|
40 |
+
if content_type in ["InternalSearchQuery", "InternalSearchResult"]:
|
41 |
+
content += "\n"
|
42 |
+
data["choices"] = [
|
43 |
+
{
|
44 |
+
"index": 0,
|
45 |
+
"delta": {"content": content},
|
46 |
+
"finish_reason": None,
|
47 |
+
}
|
48 |
+
]
|
49 |
+
elif content_type == "Finished":
|
50 |
+
data["choices"] = [
|
51 |
+
{
|
52 |
+
"index": 0,
|
53 |
+
"delta": {},
|
54 |
+
"finish_reason": "stop",
|
55 |
+
}
|
56 |
+
]
|
57 |
+
else:
|
58 |
+
data["choices"] = [
|
59 |
+
{
|
60 |
+
"index": 0,
|
61 |
+
"delta": {},
|
62 |
+
"finish_reason": None,
|
63 |
+
}
|
64 |
+
]
|
65 |
+
return self.data_to_string(data, content_type)
|
mocks/__init__.py
ADDED
File without changes
|
mocks/stream_chat_mocker.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
from utils.logger import logger
|
3 |
+
|
4 |
+
|
5 |
+
def stream_chat_mock(*args, **kwargs):
|
6 |
+
logger.note(msg=str(args) + str(kwargs))
|
7 |
+
for i in range(10):
|
8 |
+
content = f"W{i+1} "
|
9 |
+
time.sleep(0.1)
|
10 |
+
logger.mesg(content, end="")
|
11 |
+
yield content
|
12 |
+
logger.mesg("")
|
13 |
+
yield ""
|
networks/__init__.py
ADDED
File without changes
|
networks/message_streamer.py
ADDED
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import re
|
3 |
+
import requests
|
4 |
+
from tiktoken import get_encoding as tiktoken_get_encoding
|
5 |
+
from messagers.message_outputer import OpenaiStreamOutputer
|
6 |
+
from utils.logger import logger
|
7 |
+
from utils.enver import enver
|
8 |
+
|
9 |
+
|
10 |
+
class MessageStreamer:
|
11 |
+
MODEL_MAP = {
|
12 |
+
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", # 72.62, fast [Recommended]
|
13 |
+
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.2", # 65.71, fast
|
14 |
+
"nous-mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
15 |
+
# "openchat-3.5": "openchat/openchat-3.5-1210", # 68.89, fast
|
16 |
+
# "zephyr-7b-beta": "HuggingFaceH4/zephyr-7b-beta", # ❌ Too Slow
|
17 |
+
# "llama-70b": "meta-llama/Llama-2-70b-chat-hf", # ❌ Require Pro User
|
18 |
+
# "codellama-34b": "codellama/CodeLlama-34b-Instruct-hf", # ❌ Low Score
|
19 |
+
# "falcon-180b": "tiiuae/falcon-180B-chat", # ❌ Require Pro User
|
20 |
+
"default": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
21 |
+
}
|
22 |
+
STOP_SEQUENCES_MAP = {
|
23 |
+
"mixtral-8x7b": "</s>",
|
24 |
+
"mistral-7b": "</s>",
|
25 |
+
"nous-mixtral-8x7b": "<|im_end|>",
|
26 |
+
"openchat-3.5": "<|end_of_turn|>",
|
27 |
+
}
|
28 |
+
TOKEN_LIMIT_MAP = {
|
29 |
+
"mixtral-8x7b": 32768,
|
30 |
+
"mistral-7b": 32768,
|
31 |
+
"nous-mixtral-8x7b": 32768,
|
32 |
+
"openchat-3.5": 8192,
|
33 |
+
}
|
34 |
+
TOKEN_RESERVED = 100
|
35 |
+
|
36 |
+
def __init__(self, model: str):
|
37 |
+
if model in self.MODEL_MAP.keys():
|
38 |
+
self.model = model
|
39 |
+
else:
|
40 |
+
self.model = "default"
|
41 |
+
self.model_fullname = self.MODEL_MAP[self.model]
|
42 |
+
self.message_outputer = OpenaiStreamOutputer()
|
43 |
+
self.tokenizer = tiktoken_get_encoding("cl100k_base")
|
44 |
+
|
45 |
+
def parse_line(self, line):
|
46 |
+
line = line.decode("utf-8")
|
47 |
+
line = re.sub(r"data:\s*", "", line)
|
48 |
+
data = json.loads(line)
|
49 |
+
try:
|
50 |
+
content = data["token"]["text"]
|
51 |
+
except:
|
52 |
+
logger.err(data)
|
53 |
+
return content
|
54 |
+
|
55 |
+
def count_tokens(self, text):
|
56 |
+
tokens = self.tokenizer.encode(text)
|
57 |
+
token_count = len(tokens)
|
58 |
+
logger.note(f"Prompt Token Count: {token_count}")
|
59 |
+
return token_count
|
60 |
+
|
61 |
+
def chat_response(
|
62 |
+
self,
|
63 |
+
prompt: str = None,
|
64 |
+
temperature: float = 0.5,
|
65 |
+
top_p: float = 0.95,
|
66 |
+
max_new_tokens: int = None,
|
67 |
+
api_key: str = None,
|
68 |
+
use_cache: bool = False,
|
69 |
+
):
|
70 |
+
# https://huggingface.co/docs/api-inference/detailed_parameters?code=curl
|
71 |
+
# curl --proxy http://<server>:<port> https://api-inference.huggingface.co/models/<org>/<model_name> -X POST -d '{"inputs":"who are you?","parameters":{"max_new_token":64}}' -H 'Content-Type: application/json' -H 'Authorization: Bearer <HF_TOKEN>'
|
72 |
+
self.request_url = (
|
73 |
+
f"https://api-inference.huggingface.co/models/{self.model_fullname}"
|
74 |
+
)
|
75 |
+
self.request_headers = {
|
76 |
+
"Content-Type": "application/json",
|
77 |
+
}
|
78 |
+
|
79 |
+
if api_key:
|
80 |
+
logger.note(
|
81 |
+
f"Using API Key: {api_key[:3]}{(len(api_key)-7)*'*'}{api_key[-4:]}"
|
82 |
+
)
|
83 |
+
self.request_headers["Authorization"] = f"Bearer {api_key}"
|
84 |
+
|
85 |
+
if temperature is None or temperature < 0:
|
86 |
+
temperature = 0.0
|
87 |
+
# temperature must 0 < and < 1 for HF LLM models
|
88 |
+
temperature = max(temperature, 0.01)
|
89 |
+
temperature = min(temperature, 0.99)
|
90 |
+
top_p = max(top_p, 0.01)
|
91 |
+
top_p = min(top_p, 0.99)
|
92 |
+
|
93 |
+
token_limit = int(
|
94 |
+
self.TOKEN_LIMIT_MAP[self.model]
|
95 |
+
- self.TOKEN_RESERVED
|
96 |
+
- self.count_tokens(prompt) * 1.35
|
97 |
+
)
|
98 |
+
if token_limit <= 0:
|
99 |
+
raise ValueError("Prompt exceeded token limit!")
|
100 |
+
|
101 |
+
if max_new_tokens is None or max_new_tokens <= 0:
|
102 |
+
max_new_tokens = token_limit
|
103 |
+
else:
|
104 |
+
max_new_tokens = min(max_new_tokens, token_limit)
|
105 |
+
|
106 |
+
# References:
|
107 |
+
# huggingface_hub/inference/_client.py:
|
108 |
+
# class InferenceClient > def text_generation()
|
109 |
+
# huggingface_hub/inference/_text_generation.py:
|
110 |
+
# class TextGenerationRequest > param `stream`
|
111 |
+
# https://huggingface.co/docs/text-generation-inference/conceptual/streaming#streaming-with-curl
|
112 |
+
# https://huggingface.co/docs/api-inference/detailed_parameters#text-generation-task
|
113 |
+
self.request_body = {
|
114 |
+
"inputs": prompt,
|
115 |
+
"parameters": {
|
116 |
+
"temperature": temperature,
|
117 |
+
"top_p": top_p,
|
118 |
+
"max_new_tokens": max_new_tokens,
|
119 |
+
"return_full_text": False,
|
120 |
+
},
|
121 |
+
"options": {
|
122 |
+
"use_cache": use_cache,
|
123 |
+
},
|
124 |
+
"stream": True,
|
125 |
+
}
|
126 |
+
|
127 |
+
if self.model in self.STOP_SEQUENCES_MAP.keys():
|
128 |
+
self.stop_sequences = self.STOP_SEQUENCES_MAP[self.model]
|
129 |
+
# self.request_body["parameters"]["stop_sequences"] = [
|
130 |
+
# self.STOP_SEQUENCES[self.model]
|
131 |
+
# ]
|
132 |
+
|
133 |
+
logger.back(self.request_url)
|
134 |
+
enver.set_envs(proxies=True)
|
135 |
+
stream_response = requests.post(
|
136 |
+
self.request_url,
|
137 |
+
headers=self.request_headers,
|
138 |
+
json=self.request_body,
|
139 |
+
proxies=enver.requests_proxies,
|
140 |
+
stream=True,
|
141 |
+
)
|
142 |
+
status_code = stream_response.status_code
|
143 |
+
if status_code == 200:
|
144 |
+
logger.success(status_code)
|
145 |
+
else:
|
146 |
+
logger.err(status_code)
|
147 |
+
|
148 |
+
return stream_response
|
149 |
+
|
150 |
+
def chat_return_dict(self, stream_response):
|
151 |
+
# https://platform.openai.com/docs/guides/text-generation/chat-completions-response-format
|
152 |
+
final_output = self.message_outputer.default_data.copy()
|
153 |
+
final_output["choices"] = [
|
154 |
+
{
|
155 |
+
"index": 0,
|
156 |
+
"finish_reason": "stop",
|
157 |
+
"message": {
|
158 |
+
"role": "assistant",
|
159 |
+
"content": "",
|
160 |
+
},
|
161 |
+
}
|
162 |
+
]
|
163 |
+
logger.back(final_output)
|
164 |
+
|
165 |
+
final_content = ""
|
166 |
+
for line in stream_response.iter_lines():
|
167 |
+
if not line:
|
168 |
+
continue
|
169 |
+
content = self.parse_line(line)
|
170 |
+
|
171 |
+
if content.strip() == self.stop_sequences:
|
172 |
+
logger.success("\n[Finished]")
|
173 |
+
break
|
174 |
+
else:
|
175 |
+
logger.back(content, end="")
|
176 |
+
final_content += content
|
177 |
+
|
178 |
+
if self.model in self.STOP_SEQUENCES_MAP.keys():
|
179 |
+
final_content = final_content.replace(self.stop_sequences, "")
|
180 |
+
|
181 |
+
final_content = final_content.strip()
|
182 |
+
final_output["choices"][0]["message"]["content"] = final_content
|
183 |
+
return final_output
|
184 |
+
|
185 |
+
def chat_return_generator(self, stream_response):
|
186 |
+
is_finished = False
|
187 |
+
line_count = 0
|
188 |
+
for line in stream_response.iter_lines():
|
189 |
+
if line:
|
190 |
+
line_count += 1
|
191 |
+
else:
|
192 |
+
continue
|
193 |
+
|
194 |
+
content = self.parse_line(line)
|
195 |
+
|
196 |
+
if content.strip() == self.stop_sequences:
|
197 |
+
content_type = "Finished"
|
198 |
+
logger.success("\n[Finished]")
|
199 |
+
is_finished = True
|
200 |
+
else:
|
201 |
+
content_type = "Completions"
|
202 |
+
if line_count == 1:
|
203 |
+
content = content.lstrip()
|
204 |
+
logger.back(content, end="")
|
205 |
+
|
206 |
+
output = self.message_outputer.output(
|
207 |
+
content=content, content_type=content_type
|
208 |
+
)
|
209 |
+
yield output
|
210 |
+
|
211 |
+
if not is_finished:
|
212 |
+
yield self.message_outputer.output(content="", content_type="Finished")
|
requirements.txt
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiohttp
|
2 |
+
fastapi
|
3 |
+
httpx
|
4 |
+
markdown2[all]
|
5 |
+
openai
|
6 |
+
pydantic
|
7 |
+
requests
|
8 |
+
sse_starlette
|
9 |
+
termcolor
|
10 |
+
tiktoken
|
11 |
+
uvicorn
|
12 |
+
websockets
|
utils/__init__.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import requests
|
3 |
+
import os
|
4 |
+
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
|
8 |
+
class OSEnver:
|
9 |
+
def __init__(self):
|
10 |
+
self.envs_stack = []
|
11 |
+
self.envs = os.environ.copy()
|
12 |
+
|
13 |
+
def store_envs(self):
|
14 |
+
self.envs_stack.append(self.envs)
|
15 |
+
|
16 |
+
def restore_envs(self):
|
17 |
+
self.envs = self.envs_stack.pop()
|
18 |
+
if self.global_scope:
|
19 |
+
os.environ = self.envs
|
20 |
+
|
21 |
+
def set_envs(self, secrets=True, proxies=None, store_envs=True):
|
22 |
+
# caller_info = inspect.stack()[1]
|
23 |
+
# logger.back(f"OS Envs is set by: {caller_info.filename}")
|
24 |
+
|
25 |
+
if store_envs:
|
26 |
+
self.store_envs()
|
27 |
+
|
28 |
+
if secrets:
|
29 |
+
secrets_path = Path(__file__).parents[1] / "secrets.json"
|
30 |
+
if secrets_path.exists():
|
31 |
+
with open(secrets_path, "r") as rf:
|
32 |
+
secrets = json.load(rf)
|
33 |
+
else:
|
34 |
+
secrets = {}
|
35 |
+
|
36 |
+
if proxies:
|
37 |
+
for proxy_env in ["http_proxy", "https_proxy"]:
|
38 |
+
if isinstance(proxies, str):
|
39 |
+
self.envs[proxy_env] = proxies
|
40 |
+
elif "http_proxy" in secrets.keys():
|
41 |
+
self.envs[proxy_env] = secrets["http_proxy"]
|
42 |
+
elif os.getenv("http_proxy"):
|
43 |
+
self.envs[proxy_env] = os.getenv("http_proxy")
|
44 |
+
else:
|
45 |
+
continue
|
46 |
+
|
47 |
+
self.proxy = (
|
48 |
+
self.envs.get("all_proxy")
|
49 |
+
or self.envs.get("http_proxy")
|
50 |
+
or self.envs.get("https_proxy")
|
51 |
+
or None
|
52 |
+
)
|
53 |
+
self.requests_proxies = {
|
54 |
+
"http": self.proxy,
|
55 |
+
"https": self.proxy,
|
56 |
+
}
|
57 |
+
|
58 |
+
# https://www.proxynova.com/proxy-server-list/country-us/
|
59 |
+
|
60 |
+
print(f"Using proxy: [{self.proxy}]")
|
61 |
+
# r = requests.get(
|
62 |
+
# "http://ifconfig.me/ip",
|
63 |
+
# proxies=self.requests_proxies,
|
64 |
+
# timeout=10,
|
65 |
+
# )
|
66 |
+
# print(f"[r.status_code] r.text")
|
67 |
+
|
68 |
+
|
69 |
+
enver = OSEnver()
|
utils/enver.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
|
4 |
+
from pathlib import Path
|
5 |
+
from utils.logger import logger
|
6 |
+
|
7 |
+
|
8 |
+
class OSEnver:
|
9 |
+
def __init__(self):
|
10 |
+
self.envs_stack = []
|
11 |
+
self.envs = os.environ.copy()
|
12 |
+
|
13 |
+
def store_envs(self):
|
14 |
+
self.envs_stack.append(self.envs)
|
15 |
+
|
16 |
+
def restore_envs(self):
|
17 |
+
self.envs = self.envs_stack.pop()
|
18 |
+
|
19 |
+
def set_envs(self, secrets=True, proxies=None, store_envs=True):
|
20 |
+
# caller_info = inspect.stack()[1]
|
21 |
+
# logger.back(f"OS Envs is set by: {caller_info.filename}")
|
22 |
+
|
23 |
+
if store_envs:
|
24 |
+
self.store_envs()
|
25 |
+
|
26 |
+
if secrets:
|
27 |
+
secrets_path = Path(__file__).parents[1] / "secrets.json"
|
28 |
+
if secrets_path.exists():
|
29 |
+
with open(secrets_path, "r") as rf:
|
30 |
+
secrets = json.load(rf)
|
31 |
+
else:
|
32 |
+
secrets = {}
|
33 |
+
|
34 |
+
if proxies:
|
35 |
+
for proxy_env in ["http_proxy", "https_proxy"]:
|
36 |
+
if isinstance(proxies, str):
|
37 |
+
self.envs[proxy_env] = proxies
|
38 |
+
elif "http_proxy" in secrets.keys():
|
39 |
+
self.envs[proxy_env] = secrets["http_proxy"]
|
40 |
+
elif os.getenv("http_proxy"):
|
41 |
+
self.envs[proxy_env] = os.getenv("http_proxy")
|
42 |
+
else:
|
43 |
+
continue
|
44 |
+
|
45 |
+
self.proxy = (
|
46 |
+
self.envs.get("all_proxy")
|
47 |
+
or self.envs.get("http_proxy")
|
48 |
+
or self.envs.get("https_proxy")
|
49 |
+
or None
|
50 |
+
)
|
51 |
+
self.requests_proxies = {
|
52 |
+
"http": self.proxy,
|
53 |
+
"https": self.proxy,
|
54 |
+
}
|
55 |
+
|
56 |
+
if self.proxy:
|
57 |
+
logger.note(f"Using proxy: [{self.proxy}]")
|
58 |
+
|
59 |
+
|
60 |
+
enver = OSEnver()
|
utils/logger.py
ADDED
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datetime
|
2 |
+
import functools
|
3 |
+
import inspect
|
4 |
+
import logging
|
5 |
+
import os
|
6 |
+
import shutil
|
7 |
+
import subprocess
|
8 |
+
from termcolor import colored
|
9 |
+
|
10 |
+
|
11 |
+
def add_fillers(text, filler="=", fill_side="both"):
|
12 |
+
terminal_width = shutil.get_terminal_size().columns
|
13 |
+
text = text.strip()
|
14 |
+
text_width = len(text)
|
15 |
+
if text_width >= terminal_width:
|
16 |
+
return text
|
17 |
+
|
18 |
+
if fill_side[0].lower() == "b":
|
19 |
+
leading_fill_str = filler * ((terminal_width - text_width) // 2 - 1) + " "
|
20 |
+
trailing_fill_str = " " + filler * (
|
21 |
+
terminal_width - text_width - len(leading_fill_str) - 1
|
22 |
+
)
|
23 |
+
elif fill_side[0].lower() == "l":
|
24 |
+
leading_fill_str = filler * (terminal_width - text_width - 1) + " "
|
25 |
+
trailing_fill_str = ""
|
26 |
+
elif fill_side[0].lower() == "r":
|
27 |
+
leading_fill_str = ""
|
28 |
+
trailing_fill_str = " " + filler * (terminal_width - text_width - 1)
|
29 |
+
else:
|
30 |
+
raise ValueError("Invalid fill_side")
|
31 |
+
|
32 |
+
filled_str = f"{leading_fill_str}{text}{trailing_fill_str}"
|
33 |
+
return filled_str
|
34 |
+
|
35 |
+
|
36 |
+
class OSLogger(logging.Logger):
|
37 |
+
LOG_METHODS = {
|
38 |
+
"err": ("error", "red"),
|
39 |
+
"warn": ("warning", "light_red"),
|
40 |
+
"note": ("info", "light_magenta"),
|
41 |
+
"mesg": ("info", "light_cyan"),
|
42 |
+
"file": ("info", "light_blue"),
|
43 |
+
"line": ("info", "white"),
|
44 |
+
"success": ("info", "light_green"),
|
45 |
+
"fail": ("info", "light_red"),
|
46 |
+
"back": ("debug", "light_cyan"),
|
47 |
+
}
|
48 |
+
INDENT_METHODS = [
|
49 |
+
"indent",
|
50 |
+
"set_indent",
|
51 |
+
"reset_indent",
|
52 |
+
"store_indent",
|
53 |
+
"restore_indent",
|
54 |
+
"log_indent",
|
55 |
+
]
|
56 |
+
LEVEL_METHODS = [
|
57 |
+
"set_level",
|
58 |
+
"store_level",
|
59 |
+
"restore_level",
|
60 |
+
"quiet",
|
61 |
+
"enter_quiet",
|
62 |
+
"exit_quiet",
|
63 |
+
]
|
64 |
+
LEVEL_NAMES = {
|
65 |
+
"critical": logging.CRITICAL,
|
66 |
+
"error": logging.ERROR,
|
67 |
+
"warning": logging.WARNING,
|
68 |
+
"info": logging.INFO,
|
69 |
+
"debug": logging.DEBUG,
|
70 |
+
}
|
71 |
+
|
72 |
+
def __init__(self, name=None, prefix=False):
|
73 |
+
if not name:
|
74 |
+
frame = inspect.stack()[1]
|
75 |
+
module = inspect.getmodule(frame[0])
|
76 |
+
name = module.__name__
|
77 |
+
|
78 |
+
super().__init__(name)
|
79 |
+
self.setLevel(logging.INFO)
|
80 |
+
|
81 |
+
if prefix:
|
82 |
+
formatter_prefix = "[%(asctime)s] - [%(name)s] - [%(levelname)s]\n"
|
83 |
+
else:
|
84 |
+
formatter_prefix = ""
|
85 |
+
|
86 |
+
self.formatter = logging.Formatter(formatter_prefix + "%(message)s")
|
87 |
+
|
88 |
+
stream_handler = logging.StreamHandler()
|
89 |
+
stream_handler.setLevel(logging.INFO)
|
90 |
+
stream_handler.setFormatter(self.formatter)
|
91 |
+
self.addHandler(stream_handler)
|
92 |
+
|
93 |
+
self.log_indent = 0
|
94 |
+
self.log_indents = []
|
95 |
+
|
96 |
+
self.log_level = "info"
|
97 |
+
self.log_levels = []
|
98 |
+
|
99 |
+
def indent(self, indent=2):
|
100 |
+
self.log_indent += indent
|
101 |
+
|
102 |
+
def set_indent(self, indent=2):
|
103 |
+
self.log_indent = indent
|
104 |
+
|
105 |
+
def reset_indent(self):
|
106 |
+
self.log_indent = 0
|
107 |
+
|
108 |
+
def store_indent(self):
|
109 |
+
self.log_indents.append(self.log_indent)
|
110 |
+
|
111 |
+
def restore_indent(self):
|
112 |
+
self.log_indent = self.log_indents.pop(-1)
|
113 |
+
|
114 |
+
def set_level(self, level):
|
115 |
+
self.log_level = level
|
116 |
+
self.setLevel(self.LEVEL_NAMES[level])
|
117 |
+
|
118 |
+
def store_level(self):
|
119 |
+
self.log_levels.append(self.log_level)
|
120 |
+
|
121 |
+
def restore_level(self):
|
122 |
+
self.log_level = self.log_levels.pop(-1)
|
123 |
+
self.set_level(self.log_level)
|
124 |
+
|
125 |
+
def quiet(self):
|
126 |
+
self.set_level("critical")
|
127 |
+
|
128 |
+
def enter_quiet(self, quiet=False):
|
129 |
+
if quiet:
|
130 |
+
self.store_level()
|
131 |
+
self.quiet()
|
132 |
+
|
133 |
+
def exit_quiet(self, quiet=False):
|
134 |
+
if quiet:
|
135 |
+
self.restore_level()
|
136 |
+
|
137 |
+
def log(
|
138 |
+
self,
|
139 |
+
level,
|
140 |
+
color,
|
141 |
+
msg,
|
142 |
+
indent=0,
|
143 |
+
fill=False,
|
144 |
+
fill_side="both",
|
145 |
+
end="\n",
|
146 |
+
*args,
|
147 |
+
**kwargs,
|
148 |
+
):
|
149 |
+
if type(msg) == str:
|
150 |
+
msg_str = msg
|
151 |
+
else:
|
152 |
+
msg_str = repr(msg)
|
153 |
+
quotes = ["'", '"']
|
154 |
+
if msg_str[0] in quotes and msg_str[-1] in quotes:
|
155 |
+
msg_str = msg_str[1:-1]
|
156 |
+
|
157 |
+
indent_str = " " * (self.log_indent + indent)
|
158 |
+
indented_msg = "\n".join([indent_str + line for line in msg_str.split("\n")])
|
159 |
+
|
160 |
+
if fill:
|
161 |
+
indented_msg = add_fillers(indented_msg, fill_side=fill_side)
|
162 |
+
|
163 |
+
handler = self.handlers[0]
|
164 |
+
handler.terminator = end
|
165 |
+
|
166 |
+
getattr(self, level)(colored(indented_msg, color), *args, **kwargs)
|
167 |
+
|
168 |
+
def route_log(self, method, msg, *args, **kwargs):
|
169 |
+
level, method = method
|
170 |
+
functools.partial(self.log, level, method, msg)(*args, **kwargs)
|
171 |
+
|
172 |
+
def err(self, msg: str = "", *args, **kwargs):
|
173 |
+
self.route_log(("error", "red"), msg, *args, **kwargs)
|
174 |
+
|
175 |
+
def warn(self, msg: str = "", *args, **kwargs):
|
176 |
+
self.route_log(("warning", "light_red"), msg, *args, **kwargs)
|
177 |
+
|
178 |
+
def note(self, msg: str = "", *args, **kwargs):
|
179 |
+
self.route_log(("info", "light_magenta"), msg, *args, **kwargs)
|
180 |
+
|
181 |
+
def mesg(self, msg: str = "", *args, **kwargs):
|
182 |
+
self.route_log(("info", "light_cyan"), msg, *args, **kwargs)
|
183 |
+
|
184 |
+
def file(self, msg: str = "", *args, **kwargs):
|
185 |
+
self.route_log(("info", "light_blue"), msg, *args, **kwargs)
|
186 |
+
|
187 |
+
def line(self, msg: str = "", *args, **kwargs):
|
188 |
+
self.route_log(("info", "white"), msg, *args, **kwargs)
|
189 |
+
|
190 |
+
def success(self, msg: str = "", *args, **kwargs):
|
191 |
+
self.route_log(("info", "light_green"), msg, *args, **kwargs)
|
192 |
+
|
193 |
+
def fail(self, msg: str = "", *args, **kwargs):
|
194 |
+
self.route_log(("info", "light_red"), msg, *args, **kwargs)
|
195 |
+
|
196 |
+
def back(self, msg: str = "", *args, **kwargs):
|
197 |
+
self.route_log(("debug", "light_cyan"), msg, *args, **kwargs)
|
198 |
+
|
199 |
+
|
200 |
+
logger = OSLogger()
|
201 |
+
|
202 |
+
|
203 |
+
def shell_cmd(cmd, getoutput=False, showcmd=True, env=None):
|
204 |
+
if showcmd:
|
205 |
+
logger.info(colored(f"\n$ [{os.getcwd()}]", "light_blue"))
|
206 |
+
logger.info(colored(f" $ {cmd}\n", "light_cyan"))
|
207 |
+
if getoutput:
|
208 |
+
output = subprocess.getoutput(cmd, env=env)
|
209 |
+
return output
|
210 |
+
else:
|
211 |
+
subprocess.run(cmd, shell=True, env=env)
|
212 |
+
|
213 |
+
|
214 |
+
class Runtimer:
|
215 |
+
def __enter__(self):
|
216 |
+
self.t1, _ = self.start_time()
|
217 |
+
return self
|
218 |
+
|
219 |
+
def __exit__(self, exc_type, exc_value, traceback):
|
220 |
+
self.t2, _ = self.end_time()
|
221 |
+
self.elapsed_time(self.t2 - self.t1)
|
222 |
+
|
223 |
+
def start_time(self):
|
224 |
+
t1 = datetime.datetime.now()
|
225 |
+
self.logger_time("start", t1)
|
226 |
+
return t1, self.time2str(t1)
|
227 |
+
|
228 |
+
def end_time(self):
|
229 |
+
t2 = datetime.datetime.now()
|
230 |
+
self.logger_time("end", t2)
|
231 |
+
return t2, self.time2str(t2)
|
232 |
+
|
233 |
+
def elapsed_time(self, dt=None):
|
234 |
+
if dt is None:
|
235 |
+
dt = self.t2 - self.t1
|
236 |
+
self.logger_time("elapsed", dt)
|
237 |
+
return dt, self.time2str(dt)
|
238 |
+
|
239 |
+
def logger_time(self, time_type, t):
|
240 |
+
time_types = {
|
241 |
+
"start": "Start",
|
242 |
+
"end": "End",
|
243 |
+
"elapsed": "Elapsed",
|
244 |
+
}
|
245 |
+
time_str = add_fillers(
|
246 |
+
colored(
|
247 |
+
f"{time_types[time_type]} time: [ {self.time2str(t)} ]",
|
248 |
+
"light_magenta",
|
249 |
+
),
|
250 |
+
fill_side="both",
|
251 |
+
)
|
252 |
+
logger.line(time_str)
|
253 |
+
|
254 |
+
# Convert time to string
|
255 |
+
def time2str(self, t):
|
256 |
+
datetime_str_format = "%Y-%m-%d %H:%M:%S"
|
257 |
+
if isinstance(t, datetime.datetime):
|
258 |
+
return t.strftime(datetime_str_format)
|
259 |
+
elif isinstance(t, datetime.timedelta):
|
260 |
+
hours = t.seconds // 3600
|
261 |
+
hour_str = f"{hours} hr" if hours > 0 else ""
|
262 |
+
minutes = (t.seconds // 60) % 60
|
263 |
+
minute_str = f"{minutes:>2} min" if minutes > 0 else ""
|
264 |
+
seconds = t.seconds % 60
|
265 |
+
second_str = f"{seconds:>2} s"
|
266 |
+
time_str = " ".join([hour_str, minute_str, second_str]).strip()
|
267 |
+
return time_str
|
268 |
+
else:
|
269 |
+
return str(t)
|