vericudebuget commited on
Commit
ec4d6e3
·
verified ·
1 Parent(s): 88b6b78

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -43
app.py CHANGED
@@ -1,19 +1,11 @@
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
  import datetime
4
- import re
5
 
6
  # Initialize the InferenceClient
7
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
8
 
9
- # Define the system prompt templates
10
- system_prompt_templates = {
11
- r"\btime\b|\bhour\b|\bclock\b": "server log: ~This message was sent at {formatted_time}.~",
12
- r"\bdate\b|\bcalendar\b": "server log: ~Today's date is {formatted_date}.~",
13
- }
14
-
15
-
16
- def format_prompt(message, history, system_prompt):
17
  prompt = "<s>"
18
  for user_prompt, bot_response in history:
19
  prompt += f"\[INST\] {user_prompt} \[/INST\]"
@@ -33,16 +25,10 @@ def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=904
33
  seed=42,
34
  )
35
 
36
- # Get current time and date
37
  now = datetime.datetime.now()
38
  formatted_time = now.strftime("%H.%M.%S, %B, %Y")
39
- formatted_date = now.strftime("%B %d, %Y")
40
-
41
- # Check for keywords in the user's input and update the system prompt accordingly
42
- for keyword, template in system_prompt_templates.items():
43
- if re.search(keyword, prompt, re.IGNORECASE):
44
- system_prompt = template.format(formatted_time=formatted_time, formatted_date=formatted_date)
45
- break
46
 
47
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
48
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
@@ -59,29 +45,10 @@ additional_inputs = [
59
  gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
60
  ]
61
 
62
- def check_keywords(text):
63
- for keyword, _ in system_prompt_templates.items():
64
- if re.search(keyword, text, re.IGNORECASE):
65
- return True
66
- return False
67
-
68
- chatbot = gr.Chatbot(show_label=True, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
69
- with gr.Blocks():
70
- with gr.Row():
71
- with gr.Column(scale=3):
72
- user_input = gr.Textbox(label="Your message", placeholder="Type your message here...")
73
- with gr.Column(scale=1):
74
- submit_button = gr.Button("Send")
75
-
76
- with gr.Row():
77
- chatbot_output = chatbot
78
-
79
- submit_button.click(
80
- fn=generate,
81
- inputs=[user_input, chatbot, gr.Textbox(label="System Prompt", max_lines=1, interactive=True)],
82
- outputs=chatbot_output,
83
- every=200,
84
- _js="check_keywords"
85
- )
86
-
87
- gr.Blocks().launch(show_api=False)
 
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
  import datetime
 
4
 
5
  # Initialize the InferenceClient
6
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
7
 
8
+ def format_prompt(message, history):
 
 
 
 
 
 
 
9
  prompt = "<s>"
10
  for user_prompt, bot_response in history:
11
  prompt += f"\[INST\] {user_prompt} \[/INST\]"
 
25
  seed=42,
26
  )
27
 
28
+ # Get current time
29
  now = datetime.datetime.now()
30
  formatted_time = now.strftime("%H.%M.%S, %B, %Y")
31
+ system_prompt = f"server log: ~ignore unless asked{formatted_time}.~"
 
 
 
 
 
 
32
 
33
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
34
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
 
45
  gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
46
  ]
47
 
48
+ gr.ChatInterface(
49
+ fn=generate,
50
+ chatbot=gr.Chatbot(show_label=True, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
51
+ additional_inputs=additional_inputs,
52
+ title="ConvoLite",
53
+ concurrency_limit=20,
54
+ ).launch(show_api=False)