Update app.py
Browse files
app.py
CHANGED
@@ -1,21 +1,28 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
|
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
"""
|
5 |
-
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
6 |
-
"""
|
7 |
-
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
8 |
|
9 |
|
10 |
def respond(
|
11 |
message,
|
12 |
history: list[tuple[str, str]],
|
13 |
-
system_message,
|
14 |
max_tokens,
|
15 |
temperature,
|
16 |
top_p,
|
17 |
):
|
18 |
-
messages = [{"role": "system", "content":
|
19 |
|
20 |
for val in history:
|
21 |
if val[0]:
|
@@ -46,7 +53,6 @@ For information on how to customize the ChatInterface, peruse the gradio docs: h
|
|
46 |
demo = gr.ChatInterface(
|
47 |
respond,
|
48 |
additional_inputs=[
|
49 |
-
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
|
50 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
51 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
52 |
gr.Slider(
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
+
client = InferenceClient("Qwen/Qwen2.5-Coder-7B-Instruct")
|
4 |
|
5 |
+
promt = """
|
6 |
+
You are a knowledgeable assistant specializing in Java programming language. Your task is to answer questions about Java, including its syntax, features, libraries, best practices, and common issues. Provide clear and concise responses, and when applicable, include code examples to illustrate your explanations.
|
7 |
+
|
8 |
+
When answering questions, keep in mind the following guidelines:
|
9 |
+
|
10 |
+
Ensure accuracy and clarity in your explanations.
|
11 |
+
Provide context and background information when necessary.
|
12 |
+
Use appropriate Java terminology.
|
13 |
+
Present code snippets formatted for readability.
|
14 |
+
If a question is ambiguous, ask for clarification.
|
15 |
"""
|
|
|
|
|
|
|
16 |
|
17 |
|
18 |
def respond(
|
19 |
message,
|
20 |
history: list[tuple[str, str]],
|
|
|
21 |
max_tokens,
|
22 |
temperature,
|
23 |
top_p,
|
24 |
):
|
25 |
+
messages = [{"role": "system", "content": promt}]
|
26 |
|
27 |
for val in history:
|
28 |
if val[0]:
|
|
|
53 |
demo = gr.ChatInterface(
|
54 |
respond,
|
55 |
additional_inputs=[
|
|
|
56 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
57 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
58 |
gr.Slider(
|