emeses commited on
Commit
a677db4
·
1 Parent(s): 1dee68b

Update space

Browse files
Files changed (2) hide show
  1. app.py +47 -56
  2. requirements.txt +0 -1
app.py CHANGED
@@ -1,73 +1,64 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- from typing import List, Dict
4
 
5
- # Initialize client with your model
6
- client = InferenceClient("ivwhy/lora_model")
 
 
 
7
 
8
  def respond(
9
- message: str,
10
- history: List[Dict[str, str]],
11
- system_message: str = "You are a helpful assistant.",
12
- max_tokens: int = 512,
13
- temperature: float = 0.7,
14
- top_p: float = 0.95,
15
- ) -> str:
16
- # Format conversation using new messages format
17
- conversation = system_message + "\n\n"
18
- for msg in history:
19
- role = msg["role"]
20
- content = msg["content"]
21
- conversation += f"{role.capitalize()}: {content}\n"
22
- conversation += f"User: {message}\nAssistant: "
 
 
23
 
24
- # Stream response
25
  response = ""
26
- try:
27
- for token in client.text_generation(
28
- conversation,
29
- max_new_tokens=max_tokens,
30
- temperature=temperature,
31
- top_p=top_p,
32
- stream=True,
33
- ):
34
- response += token
35
- yield response
36
- except Exception as e:
37
- yield f"Error: {str(e)}"
38
 
39
- # Create Gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  demo = gr.ChatInterface(
41
  respond,
42
  additional_inputs=[
43
- gr.Textbox(
44
- value="You are a helpful assistant.",
45
- label="System message"
46
- ),
47
- gr.Slider(
48
- minimum=1,
49
- maximum=2048,
50
- value=512,
51
- step=1,
52
- label="Max tokens"
53
- ),
54
  gr.Slider(
55
- minimum=0.1,
56
- maximum=2.0,
57
- value=0.7,
58
- step=0.1,
59
- label="Temperature"
60
- ),
61
- gr.Slider(
62
- minimum=0.1,
63
- maximum=1.0,
64
- value=0.95,
65
- step=0.05,
66
- label="Top-p"
67
  ),
68
  ],
69
- title="AI Assistant",
70
  )
71
 
 
72
  if __name__ == "__main__":
73
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
+ """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ """
7
+ client = InferenceClient("emeses/lab2_model")
8
+
9
 
10
  def respond(
11
+ message,
12
+ history: list[tuple[str, str]],
13
+ system_message,
14
+ max_tokens,
15
+ temperature,
16
+ top_p,
17
+ ):
18
+ messages = [{"role": "system", "content": system_message}]
19
+
20
+ for val in history:
21
+ if val[0]:
22
+ messages.append({"role": "user", "content": val[0]})
23
+ if val[1]:
24
+ messages.append({"role": "assistant", "content": val[1]})
25
+
26
+ messages.append({"role": "user", "content": message})
27
 
 
28
  response = ""
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
+ for message in client.chat_completion(
31
+ messages,
32
+ max_tokens=max_tokens,
33
+ stream=True,
34
+ temperature=temperature,
35
+ top_p=top_p,
36
+ ):
37
+ token = message.choices[0].delta.content
38
+
39
+ response += token
40
+ yield response
41
+
42
+
43
+ """
44
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
+ """
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
49
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
 
 
 
 
 
 
 
 
52
  gr.Slider(
53
+ minimum=0.1,
54
+ maximum=1.0,
55
+ value=0.95,
56
+ step=0.05,
57
+ label="Top-p (nucleus sampling)",
 
 
 
 
 
 
 
58
  ),
59
  ],
 
60
  )
61
 
62
+
63
  if __name__ == "__main__":
64
+ demo.launch()
requirements.txt CHANGED
@@ -1,3 +1,2 @@
1
  huggingface_hub==0.25.2
2
  gradio>=3.50.0
3
- typing-extensions>=4.5.0
 
1
  huggingface_hub==0.25.2
2
  gradio>=3.50.0