Spaces:
Sleeping
Sleeping
Commit
·
e3e55df
1
Parent(s):
f8955c9
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from llama_index.llms import Perplexity
|
3 |
+
from llama_index.llms.base import ChatMessage
|
4 |
+
|
5 |
+
# Define the main function for handling chat interactions.
|
6 |
+
def chat_with_pplx_model(api_key, model_name, user_input, pre_prompt, system_message, temperature, max_tokens):
|
7 |
+
# Convert max_tokens to integer if it is not empty. This controls the length of the model's responses.
|
8 |
+
max_tokens = int(max_tokens) if max_tokens else None
|
9 |
+
|
10 |
+
# Prepend the pre-prompt text to the user input. This allows for setting a context or instructions.
|
11 |
+
full_user_input = f"{pre_prompt}\n{user_input}"
|
12 |
+
|
13 |
+
# Initialize the Perplexity model with the given parameters.
|
14 |
+
llm = Perplexity(
|
15 |
+
api_key=api_key,
|
16 |
+
model_name=model_name,
|
17 |
+
temperature=temperature,
|
18 |
+
max_tokens=max_tokens # If max_tokens is None, the model uses its default value.
|
19 |
+
)
|
20 |
+
|
21 |
+
# Prepare the chat messages for interaction with the model.
|
22 |
+
messages_dict = [
|
23 |
+
{"role": "system", "content": system_message}, # System message, like an initial greeting or instructions.
|
24 |
+
{"role": "user", "content": full_user_input} # The actual user input, prepended with the pre_prompt.
|
25 |
+
]
|
26 |
+
messages = [ChatMessage(**msg) for msg in messages_dict]
|
27 |
+
|
28 |
+
# Get the response from the LLM.
|
29 |
+
response = llm.chat(messages)
|
30 |
+
return response
|
31 |
+
|
32 |
+
# Gradio Interface components.
|
33 |
+
api_key_input = gr.Textbox(label="API Key") # Input for the API key.
|
34 |
+
model_name_dropdown = gr.Dropdown(choices=["pplx-70b-online", "pplx-7b-online", "mixtral-8x7b-instruct"], label="LLM Model Name") # Input for the model name.
|
35 |
+
user_input = gr.Textbox(placeholder="Enter your input here", label="User Input") # Input for user's message.
|
36 |
+
pre_prompt_input = gr.Textbox(placeholder="Enter pre-prompt here", label="Pre-Prompt") # Input for the pre-prompt text.
|
37 |
+
system_message = gr.Textbox(placeholder="Enter system message here", label="System Message") # Input for system message.
|
38 |
+
temperature_slider = gr.Slider(minimum=0, maximum=2, step=0.01, label="Temperature") # Slider to adjust the temperature.
|
39 |
+
max_tokens_input = gr.Textbox(placeholder="Enter max tokens (optional)", label="Max Tokens") # Input for max tokens.
|
40 |
+
|
41 |
+
# Creating the Gradio interface.
|
42 |
+
iface = gr.Interface(
|
43 |
+
fn=chat_with_pplx_model,
|
44 |
+
inputs=[api_key_input, model_name_dropdown, user_input, pre_prompt_input, system_message, temperature_slider, max_tokens_input],
|
45 |
+
outputs="text"
|
46 |
+
)
|
47 |
+
|
48 |
+
# Launching the interface.
|
49 |
+
iface.launch(share=True)
|