Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,7 @@ from fastapi.responses import StreamingResponse
|
|
6 |
import uvicorn
|
7 |
from dotenv import load_dotenv
|
8 |
import os
|
|
|
9 |
|
10 |
# Load environment variables from .env file
|
11 |
load_dotenv()
|
@@ -27,7 +28,6 @@ class v1:
|
|
27 |
):
|
28 |
"""
|
29 |
Initializes the v1 AI API with given parameters.
|
30 |
-
|
31 |
Args:
|
32 |
model (str, optional): The AI model to use for text generation. Defaults to "claude".
|
33 |
Options: "llama", "claude".
|
@@ -78,6 +78,7 @@ class v1:
|
|
78 |
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
79 |
)
|
80 |
|
|
|
81 |
for line in response.iter_lines(decode_unicode=True):
|
82 |
if line:
|
83 |
if line.startswith("data: "):
|
@@ -90,14 +91,29 @@ class v1:
|
|
90 |
(self.model == "claude" and model == 'OPENROUTER_CLAUDE'):
|
91 |
content = data['chunk']['content']
|
92 |
if content:
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
except KeyError:
|
95 |
pass
|
96 |
except json.JSONDecodeError:
|
97 |
pass
|
98 |
|
|
|
|
|
|
|
|
|
99 |
yield "[DONE]"
|
100 |
|
|
|
|
|
|
|
|
|
|
|
101 |
def chat(self, prompt: str) -> Generator[str, None, None]:
|
102 |
"""Stream responses as string chunks"""
|
103 |
return self.ask(prompt)
|
|
|
6 |
import uvicorn
|
7 |
from dotenv import load_dotenv
|
8 |
import os
|
9 |
+
import re
|
10 |
|
11 |
# Load environment variables from .env file
|
12 |
load_dotenv()
|
|
|
28 |
):
|
29 |
"""
|
30 |
Initializes the v1 AI API with given parameters.
|
|
|
31 |
Args:
|
32 |
model (str, optional): The AI model to use for text generation. Defaults to "claude".
|
33 |
Options: "llama", "claude".
|
|
|
78 |
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
79 |
)
|
80 |
|
81 |
+
buffer = ""
|
82 |
for line in response.iter_lines(decode_unicode=True):
|
83 |
if line:
|
84 |
if line.startswith("data: "):
|
|
|
91 |
(self.model == "claude" and model == 'OPENROUTER_CLAUDE'):
|
92 |
content = data['chunk']['content']
|
93 |
if content:
|
94 |
+
buffer += content
|
95 |
+
# Check if we have a complete line or paragraph
|
96 |
+
lines = buffer.split('\n')
|
97 |
+
if len(lines) > 1:
|
98 |
+
for complete_line in lines[:-1]:
|
99 |
+
yield self.format_text(complete_line) + '\n'
|
100 |
+
buffer = lines[-1]
|
101 |
except KeyError:
|
102 |
pass
|
103 |
except json.JSONDecodeError:
|
104 |
pass
|
105 |
|
106 |
+
# Yield any remaining content in the buffer
|
107 |
+
if buffer:
|
108 |
+
yield self.format_text(buffer)
|
109 |
+
|
110 |
yield "[DONE]"
|
111 |
|
112 |
+
def format_text(self, text: str) -> str:
|
113 |
+
# Convert *text* to <i>text</i> for italic
|
114 |
+
text = re.sub(r'\*(.*?)\*', r'<i>\1</i>', text)
|
115 |
+
return text
|
116 |
+
|
117 |
def chat(self, prompt: str) -> Generator[str, None, None]:
|
118 |
"""Stream responses as string chunks"""
|
119 |
return self.ask(prompt)
|