Niansuh commited on
Commit
b50abc0
·
verified ·
1 Parent(s): 140b80f

Upload 3 files

Browse files
Files changed (3) hide show
  1. .gitattributes +2 -35
  2. main.py +254 -0
  3. requirements.txt +7 -0
.gitattributes CHANGED
@@ -1,35 +1,2 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ # Auto detect text files and perform LF normalization
2
+ * text=auto
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
main.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+ import uuid
5
+ from datetime import datetime
6
+ from typing import Any, Dict, List, Optional
7
+
8
+ import httpx
9
+ import uvicorn
10
+ from dotenv import load_dotenv
11
+ from fastapi import FastAPI, HTTPException, Depends
12
+ from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
13
+ from pydantic import BaseModel
14
+ from starlette.middleware.cors import CORSMiddleware
15
+ from starlette.responses import StreamingResponse, Response
16
+
17
+ logging.basicConfig(
18
+ level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
19
+ )
20
+ logger = logging.getLogger(__name__)
21
+
22
+ load_dotenv()
23
+ app = FastAPI()
24
+ BASE_URL = "https://aichatonlineorg.erweima.ai/aichatonline"
25
+ APP_SECRET = os.getenv("APP_SECRET","666")
26
+ ACCESS_TOKEN = os.getenv("SD_ACCESS_TOKEN","")
27
+ headers = {
28
+ 'accept': '*/*',
29
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
30
+ 'authorization': f'Bearer {ACCESS_TOKEN}',
31
+ 'cache-control': 'no-cache',
32
+ 'origin': 'chrome-extension://dhoenijjpgpeimemopealfcbiecgceod',
33
+ 'pragma': 'no-cache',
34
+ 'priority': 'u=1, i',
35
+ 'sec-fetch-dest': 'empty',
36
+ 'sec-fetch-mode': 'cors',
37
+ 'sec-fetch-site': 'none',
38
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0',
39
+ }
40
+
41
+ ALLOWED_MODELS = [
42
+ {"id": "claude-3.5-sonnet", "name": "claude-3.5-sonnet"},
43
+ {"id": "claude-3-opus", "name": "claude-3-opus"},
44
+ {"id": "gemini-1.5-pro", "name": "gemini-1.5-pro"},
45
+ {"id": "gpt-4o", "name": "gpt-4o"},
46
+ {"id": "o1-preview", "name": "o1-preview"},
47
+ {"id": "o1-mini", "name": "o1-mini"},
48
+ {"id": "gpt-4o-mini", "name": "gpt-4o-mini"},
49
+ ]
50
+ # Configure CORS
51
+ app.add_middleware(
52
+ CORSMiddleware,
53
+ allow_origins=["*"], # Allow all sources, you can restrict specific sources if needed
54
+ allow_credentials=True,
55
+ allow_methods=["*"], # All methods allowed
56
+ allow_headers=["*"], # Allow all headers
57
+ )
58
+ security = HTTPBearer()
59
+
60
+
61
+ class Message(BaseModel):
62
+ role: str
63
+ content: str
64
+
65
+
66
+ class ChatRequest(BaseModel):
67
+ model: str
68
+ messages: List[Message]
69
+ stream: Optional[bool] = False
70
+
71
+
72
+ def simulate_data(content, model):
73
+ return {
74
+ "id": f"chatcmpl-{uuid.uuid4()}",
75
+ "object": "chat.completion.chunk",
76
+ "created": int(datetime.now().timestamp()),
77
+ "model": model,
78
+ "choices": [
79
+ {
80
+ "index": 0,
81
+ "delta": {"content": content, "role": "assistant"},
82
+ "finish_reason": None,
83
+ }
84
+ ],
85
+ "usage": None,
86
+ }
87
+
88
+
89
+ def stop_data(content, model):
90
+ return {
91
+ "id": f"chatcmpl-{uuid.uuid4()}",
92
+ "object": "chat.completion.chunk",
93
+ "created": int(datetime.now().timestamp()),
94
+ "model": model,
95
+ "choices": [
96
+ {
97
+ "index": 0,
98
+ "delta": {"content": content, "role": "assistant"},
99
+ "finish_reason": "stop",
100
+ }
101
+ ],
102
+ "usage": None,
103
+ }
104
+
105
+
106
+ def create_chat_completion_data(content: str, model: str, finish_reason: Optional[str] = None) -> Dict[str, Any]:
107
+ return {
108
+ "id": f"chatcmpl-{uuid.uuid4()}",
109
+ "object": "chat.completion.chunk",
110
+ "created": int(datetime.now().timestamp()),
111
+ "model": model,
112
+ "choices": [
113
+ {
114
+ "index": 0,
115
+ "delta": {"content": content, "role": "assistant"},
116
+ "finish_reason": finish_reason,
117
+ }
118
+ ],
119
+ "usage": None,
120
+ }
121
+
122
+
123
+ def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
124
+ if credentials.credentials != APP_SECRET:
125
+ raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
126
+ return credentials.credentials
127
+
128
+
129
+ @app.options("/hf/v1/chat/completions")
130
+ async def chat_completions_options():
131
+ return Response(
132
+ status_code=200,
133
+ headers={
134
+ "Access-Control-Allow-Origin": "*",
135
+ "Access-Control-Allow-Methods": "POST, OPTIONS",
136
+ "Access-Control-Allow-Headers": "Content-Type, Authorization",
137
+ },
138
+ )
139
+
140
+
141
+ def replace_escaped_newlines(input_string: str) -> str:
142
+ return input_string.replace("\\n", "\n")
143
+
144
+
145
+ @app.get("/hf/v1/models")
146
+ async def list_models():
147
+ return {"object": "list", "data": ALLOWED_MODELS}
148
+
149
+
150
+ @app.post("/hf/v1/chat/completions")
151
+ async def chat_completions(
152
+ request: ChatRequest, app_secret: str = Depends(verify_app_secret)
153
+ ):
154
+ logger.info(f"Received chat completion request for model: {request.model}")
155
+
156
+ if request.model not in [model['id'] for model in ALLOWED_MODELS]:
157
+ raise HTTPException(
158
+ status_code=400,
159
+ detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(model['id'] for model in ALLOWED_MODELS)}",
160
+ )
161
+ # Generate a UUID
162
+ original_uuid = uuid.uuid4()
163
+ uuid_str = str(original_uuid).replace("-", "")
164
+
165
+ # Using the OpenAI API
166
+ json_data = {
167
+ 'prompt': "\n".join(
168
+ [
169
+ f"{'User' if msg.role == 'user' else 'Assistant'}: {msg.content}"
170
+ for msg in request.messages
171
+ ]
172
+ ),
173
+ 'stream': True,
174
+ 'app_name': 'ChitChat_Edge_Ext',
175
+ 'app_version': '4.26.1',
176
+ 'tz_name': 'Asia/Karachi',
177
+ 'cid': '',
178
+ 'model': request.model,
179
+ 'search': False,
180
+ 'auto_search': False,
181
+ 'filter_search_history': False,
182
+ 'from': 'chat',
183
+ 'group_id': 'default',
184
+ 'chat_models': [],
185
+ 'files': [],
186
+ 'prompt_template': {
187
+ 'key': '',
188
+ 'attributes': {
189
+ 'lang': 'original',
190
+ },
191
+ },
192
+ 'tools': {
193
+ 'auto': [
194
+ 'search',
195
+ 'text_to_image',
196
+ 'data_analysis',
197
+ ],
198
+ },
199
+ 'extra_info': {
200
+ 'origin_url': '',
201
+ 'origin_title': '',
202
+ },
203
+ }
204
+
205
+ async def generate():
206
+ async with httpx.AsyncClient() as client:
207
+ try:
208
+ async with client.stream('POST', 'https://sider.ai/api/v2/completion/text', headers=headers, json=json_data, timeout=120.0) as response:
209
+ response.raise_for_status()
210
+ async for line in response.aiter_lines():
211
+ if line and ("[DONE]" not in line):
212
+ content = json.loads(line[5:])["data"]
213
+ yield f"data: {json.dumps(create_chat_completion_data(content.get('text',''), request.model))}\n\n"
214
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, 'stop'))}\n\n"
215
+ yield "data: [DONE]\n\n"
216
+ except httpx.HTTPStatusError as e:
217
+ logger.error(f"HTTP error occurred: {e}")
218
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
219
+ except httpx.RequestError as e:
220
+ logger.error(f"An error occurred while requesting: {e}")
221
+ raise HTTPException(status_code=500, detail=str(e))
222
+
223
+ if request.stream:
224
+ logger.info("Streaming response")
225
+ return StreamingResponse(generate(), media_type="text/event-stream")
226
+ else:
227
+ logger.info("Non-streaming response")
228
+ full_response = ""
229
+ async for chunk in generate():
230
+ if chunk.startswith("data: ") and not chunk[6:].startswith("[DONE]"):
231
+ # print(chunk)
232
+ data = json.loads(chunk[6:])
233
+ if data["choices"][0]["delta"].get("content"):
234
+ full_response += data["choices"][0]["delta"]["content"]
235
+
236
+ return {
237
+ "id": f"chatcmpl-{uuid.uuid4()}",
238
+ "object": "chat.completion",
239
+ "created": int(datetime.now().timestamp()),
240
+ "model": request.model,
241
+ "choices": [
242
+ {
243
+ "index": 0,
244
+ "message": {"role": "assistant", "content": full_response},
245
+ "finish_reason": "stop",
246
+ }
247
+ ],
248
+ "usage": None,
249
+ }
250
+
251
+
252
+
253
+ if __name__ == "__main__":
254
+ uvicorn.run(app, host="0.0.0.0", port=7860)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ httpx
3
+ pydantic
4
+ pyinstaller
5
+ python-dotenv
6
+ starlette
7
+ uvicorn