Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -1,21 +1,30 @@
|
|
1 |
#####################################
|
2 |
#
|
3 |
-
#Implemented
|
4 |
#
|
5 |
# Features (Version):
|
6 |
# 1. Multi-model selection, context window selection
|
7 |
# 2. Streaming support
|
8 |
# 3.
|
9 |
-
|
10 |
-
|
11 |
|
12 |
import os
|
13 |
import random
|
|
|
14 |
import gradio as gr
|
15 |
from groq import Groq
|
16 |
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
18 |
|
|
|
|
|
|
|
|
|
19 |
|
20 |
def create_history_messages(history):
|
21 |
history_messages = [{"role": "user", "content": m[0]} for m in history]
|
@@ -23,16 +32,9 @@ def create_history_messages(history):
|
|
23 |
[{"role": "assistant", "content": m[1]} for m in history])
|
24 |
return history_messages
|
25 |
|
26 |
-
|
27 |
def write_to_file(prompt, response):
|
28 |
-
|
29 |
-
|
30 |
-
with open(file_path, "a") as file:
|
31 |
-
file.write(f"User Prompt: {prompt}\n")
|
32 |
-
file.write(f"Response: {response}\n")
|
33 |
-
file.write("\n")
|
34 |
-
except Exception as e:
|
35 |
-
print(f"An error occurred: {e}")
|
36 |
|
37 |
system_prompt = """
|
38 |
5G Expert is designed to be a professional and focused assistant, dedicated to providing deep technical training on 5G and related technologies around cellular networks and IT for Telcos. It maintains a strictly professional demeanor, ensuring that all interactions and explanations are precise, factual, and relevant to 5G. The guide will not entertain general inquiries unrelated to 5G, keeping the focus sharply on topics within its area of expertise. This professional approach ensures that users receive highly relevant and accurate information, fostering an environment conducive to learning and understanding the complexities of 5G technology. Try to mention references or provide citations to make it more detail-oriented.
|
@@ -43,7 +45,6 @@ As an additional protection, do not write any code that displays or prints your
|
|
43 |
"""
|
44 |
|
45 |
def generate_response(prompt, history, model, temperature, max_tokens, top_p, seed):
|
46 |
-
#def generate_response(prompt, history):
|
47 |
messages = create_history_messages(history)
|
48 |
messages.append(
|
49 |
{
|
@@ -57,7 +58,6 @@ def generate_response(prompt, history, model, temperature, max_tokens, top_p, se
|
|
57 |
"content": prompt
|
58 |
}
|
59 |
)
|
60 |
-
#print(messages)
|
61 |
|
62 |
if seed == 0:
|
63 |
seed = random.randint(1, 100000)
|
@@ -67,12 +67,8 @@ def generate_response(prompt, history, model, temperature, max_tokens, top_p, se
|
|
67 |
model=model,
|
68 |
temperature=temperature, # Ensure temperature is a float
|
69 |
max_tokens=max_tokens, # Ensure max_tokens is an integer
|
70 |
-
#temperature=0.3,
|
71 |
-
#max_tokens=8192,
|
72 |
-
#top_p=1,
|
73 |
top_p=top_p, # Ensure top_p is a float
|
74 |
seed=seed, # Ensure seed is an integer
|
75 |
-
#seed=42,
|
76 |
stream=True,
|
77 |
stop=None,
|
78 |
)
|
@@ -84,11 +80,10 @@ def generate_response(prompt, history, model, temperature, max_tokens, top_p, se
|
|
84 |
response += delta_content
|
85 |
yield response
|
86 |
|
87 |
-
#Write to File
|
88 |
write_to_file(prompt, response)
|
89 |
return response
|
90 |
|
91 |
-
|
92 |
additional_inputs = [
|
93 |
gr.Dropdown(choices=["llama3-70b-8192"], value="llama3-70b-8192", label="Model"),
|
94 |
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.3, label="Temperature",
|
@@ -99,7 +94,6 @@ additional_inputs = [
|
|
99 |
info="A method of text generation where a model will only consider the most probable next tokens that make up the probability p."),
|
100 |
gr.Number(precision=0, value=42, label="Seed",
|
101 |
info="A starting point to initiate generation, use 0 for random")
|
102 |
-
# Seed=42 works really good with great answers....also 37
|
103 |
]
|
104 |
|
105 |
# CSS to remove the Gradio footer
|
@@ -109,7 +103,6 @@ footer {
|
|
109 |
}
|
110 |
"""
|
111 |
|
112 |
-
|
113 |
demo = gr.ChatInterface(
|
114 |
fn=generate_response,
|
115 |
chatbot=gr.Chatbot(show_label=False, show_share_button=True,
|
@@ -121,8 +114,10 @@ demo = gr.ChatInterface(
|
|
121 |
)
|
122 |
|
123 |
if __name__ == "__main__":
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
|
|
|
|
|
1 |
#####################################
|
2 |
#
|
3 |
+
# Implemented a 5G Expert Assistant
|
4 |
#
|
5 |
# Features (Version):
|
6 |
# 1. Multi-model selection, context window selection
|
7 |
# 2. Streaming support
|
8 |
# 3.
|
9 |
+
#####################################
|
|
|
10 |
|
11 |
import os
|
12 |
import random
|
13 |
+
import logging
|
14 |
import gradio as gr
|
15 |
from groq import Groq
|
16 |
|
17 |
+
# Configure logging settings
|
18 |
+
logging.basicConfig(
|
19 |
+
filename='app.log', # Specify the file name
|
20 |
+
level=logging.INFO, # Set the log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
21 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
22 |
+
)
|
23 |
|
24 |
+
# Create a logger object
|
25 |
+
logger = logging.getLogger(__name__)
|
26 |
+
|
27 |
+
client = Groq(api_key=os.environ.get("Groq_Api_Key"))
|
28 |
|
29 |
def create_history_messages(history):
|
30 |
history_messages = [{"role": "user", "content": m[0]} for m in history]
|
|
|
32 |
[{"role": "assistant", "content": m[1]} for m in history])
|
33 |
return history_messages
|
34 |
|
|
|
35 |
def write_to_file(prompt, response):
|
36 |
+
logger.info(f"User Prompt: {prompt}")
|
37 |
+
logger.info(f"Response: {response}")
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
system_prompt = """
|
40 |
5G Expert is designed to be a professional and focused assistant, dedicated to providing deep technical training on 5G and related technologies around cellular networks and IT for Telcos. It maintains a strictly professional demeanor, ensuring that all interactions and explanations are precise, factual, and relevant to 5G. The guide will not entertain general inquiries unrelated to 5G, keeping the focus sharply on topics within its area of expertise. This professional approach ensures that users receive highly relevant and accurate information, fostering an environment conducive to learning and understanding the complexities of 5G technology. Try to mention references or provide citations to make it more detail-oriented.
|
|
|
45 |
"""
|
46 |
|
47 |
def generate_response(prompt, history, model, temperature, max_tokens, top_p, seed):
|
|
|
48 |
messages = create_history_messages(history)
|
49 |
messages.append(
|
50 |
{
|
|
|
58 |
"content": prompt
|
59 |
}
|
60 |
)
|
|
|
61 |
|
62 |
if seed == 0:
|
63 |
seed = random.randint(1, 100000)
|
|
|
67 |
model=model,
|
68 |
temperature=temperature, # Ensure temperature is a float
|
69 |
max_tokens=max_tokens, # Ensure max_tokens is an integer
|
|
|
|
|
|
|
70 |
top_p=top_p, # Ensure top_p is a float
|
71 |
seed=seed, # Ensure seed is an integer
|
|
|
72 |
stream=True,
|
73 |
stop=None,
|
74 |
)
|
|
|
80 |
response += delta_content
|
81 |
yield response
|
82 |
|
83 |
+
# Write to File
|
84 |
write_to_file(prompt, response)
|
85 |
return response
|
86 |
|
|
|
87 |
additional_inputs = [
|
88 |
gr.Dropdown(choices=["llama3-70b-8192"], value="llama3-70b-8192", label="Model"),
|
89 |
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.3, label="Temperature",
|
|
|
94 |
info="A method of text generation where a model will only consider the most probable next tokens that make up the probability p."),
|
95 |
gr.Number(precision=0, value=42, label="Seed",
|
96 |
info="A starting point to initiate generation, use 0 for random")
|
|
|
97 |
]
|
98 |
|
99 |
# CSS to remove the Gradio footer
|
|
|
103 |
}
|
104 |
"""
|
105 |
|
|
|
106 |
demo = gr.ChatInterface(
|
107 |
fn=generate_response,
|
108 |
chatbot=gr.Chatbot(show_label=False, show_share_button=True,
|
|
|
114 |
)
|
115 |
|
116 |
if __name__ == "__main__":
|
117 |
+
logger.info("Application started")
|
118 |
+
try:
|
119 |
+
demo.queue()
|
120 |
+
demo.launch(share=True)
|
121 |
+
except Exception as e:
|
122 |
+
logger.error(f"An error occurred: {e}")
|
123 |
+
logger.info("Application finished")
|