Spaces:
Build error
Build error
contenteaseAI
commited on
Commit
•
add7ded
1
Parent(s):
807af7a
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,11 @@
|
|
1 |
import gradio as gr
|
2 |
from openai import OpenAI
|
3 |
import os
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
css = '''
|
6 |
.gradio-container{max-width: 1000px !important}
|
@@ -12,11 +17,19 @@ footer {
|
|
12 |
|
13 |
ACCESS_TOKEN = os.getenv("HF_TOKEN")
|
14 |
|
|
|
|
|
|
|
|
|
15 |
client = OpenAI(
|
16 |
base_url="https://api-inference.huggingface.co/v1/",
|
17 |
api_key=ACCESS_TOKEN,
|
18 |
)
|
19 |
|
|
|
|
|
|
|
|
|
20 |
def respond(
|
21 |
message,
|
22 |
history,
|
@@ -69,6 +82,7 @@ def respond(
|
|
69 |
Please ensure that the output JSON is well-structured and includes only relevant details about the work to be done.
|
70 |
"""
|
71 |
messages = [{"role": "system", "content": SYS_PROMPT}]
|
|
|
72 |
if len(history) == 0:
|
73 |
pass
|
74 |
else:
|
@@ -84,6 +98,9 @@ def respond(
|
|
84 |
|
85 |
response = ""
|
86 |
|
|
|
|
|
|
|
87 |
for message in client.chat.completions.create(
|
88 |
model="meta-llama/Meta-Llama-3.1-8B-Instruct",
|
89 |
max_tokens=max_tokens,
|
@@ -96,6 +113,9 @@ def respond(
|
|
96 |
response += token
|
97 |
yield response
|
98 |
|
|
|
|
|
|
|
99 |
|
100 |
DESCRIPTION = '''
|
101 |
<div>
|
|
|
1 |
import gradio as gr
|
2 |
from openai import OpenAI
|
3 |
import os
|
4 |
+
import logging
|
5 |
+
import time
|
6 |
+
|
7 |
+
logging.basicConfig(level=logging.INFO)
|
8 |
+
logger = logging.getLogger(__name__)
|
9 |
|
10 |
css = '''
|
11 |
.gradio-container{max-width: 1000px !important}
|
|
|
17 |
|
18 |
ACCESS_TOKEN = os.getenv("HF_TOKEN")
|
19 |
|
20 |
+
|
21 |
+
start_time = time.time()
|
22 |
+
logger.info("Loading Client....")
|
23 |
+
|
24 |
client = OpenAI(
|
25 |
base_url="https://api-inference.huggingface.co/v1/",
|
26 |
api_key=ACCESS_TOKEN,
|
27 |
)
|
28 |
|
29 |
+
end_time = time.time()
|
30 |
+
logger.info(f"Client Loaded. Time taken : {end_time - start_time} seconds.")
|
31 |
+
|
32 |
+
#interact with API
|
33 |
def respond(
|
34 |
message,
|
35 |
history,
|
|
|
82 |
Please ensure that the output JSON is well-structured and includes only relevant details about the work to be done.
|
83 |
"""
|
84 |
messages = [{"role": "system", "content": SYS_PROMPT}]
|
85 |
+
|
86 |
if len(history) == 0:
|
87 |
pass
|
88 |
else:
|
|
|
98 |
|
99 |
response = ""
|
100 |
|
101 |
+
start_time = time.time()
|
102 |
+
logger.info("Generating Response....")
|
103 |
+
|
104 |
for message in client.chat.completions.create(
|
105 |
model="meta-llama/Meta-Llama-3.1-8B-Instruct",
|
106 |
max_tokens=max_tokens,
|
|
|
113 |
response += token
|
114 |
yield response
|
115 |
|
116 |
+
end_time = time.time()
|
117 |
+
logger.info(f"Response Generated. Time taken : {end_time - start_time} seconds.")
|
118 |
+
|
119 |
|
120 |
DESCRIPTION = '''
|
121 |
<div>
|