likewendy commited on
Commit
865e55c
·
1 Parent(s): fef3078
Files changed (2) hide show
  1. app.py +51 -33
  2. gitignore.txt +1 -0
app.py CHANGED
@@ -1,12 +1,32 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
@@ -15,34 +35,36 @@ def respond(
15
  temperature,
16
  top_p,
17
  ):
 
18
  messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
  messages.append({"role": "user", "content": message})
27
 
28
- response = ""
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
 
 
38
 
39
- response += token
40
- yield response
 
41
 
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
@@ -51,14 +73,10 @@ demo = gr.ChatInterface(
51
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
  gr.Slider(
53
  minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
  ),
59
  ],
60
  )
61
 
62
-
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
+ import spaces
2
  import gradio as gr
3
+ import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
5
+ import os
6
 
7
+ os.system('huggingface-cli download matteogeniaccio/phi-4 --local-dir ./phi-4 --include "phi-4/*"')
 
 
 
8
 
9
+ # 加载 phi-4 模型和 tokenizer
10
+ torch.random.manual_seed(0)
11
 
12
+ model = AutoModelForCausalLM.from_pretrained(
13
+ "./phi-4", # 模型路径
14
+ device_map="cuda", # 使用 GPU
15
+ torch_dtype="auto", # 自动选择数据类型
16
+ trust_remote_code=True, # 允许远程代码加载
17
+ )
18
+ tokenizer = AutoTokenizer.from_pretrained("./phi-4")
19
+
20
+ # 设置 pipeline
21
+
22
+ pipe = pipeline(
23
+ "text-generation",
24
+ model=model,
25
+ tokenizer=tokenizer,
26
+ )
27
+
28
+ # 响应函数
29
+ @spaces.GPU
30
  def respond(
31
  message,
32
  history: list[tuple[str, str]],
 
35
  temperature,
36
  top_p,
37
  ):
38
+ # 构造消息内容
39
  messages = [{"role": "system", "content": system_message}]
40
+ for user_msg, assistant_msg in history:
41
+ if user_msg:
42
+ messages.append({"role": "user", "content": user_msg})
43
+ if assistant_msg:
44
+ messages.append({"role": "assistant", "content": assistant_msg})
 
 
45
  messages.append({"role": "user", "content": message})
46
 
47
+ # 将消息转换为字符串格式(适用于 text-generation)
48
+ input_text = "\n".join(
49
+ f"{msg['role']}: {msg['content']}" for msg in messages
50
+ )
51
 
52
+ # 生成响应
53
+ generation_args = {
54
+ "max_new_tokens": max_tokens,
55
+ "temperature": temperature,
56
+ "top_p": top_p,
57
+ "do_sample": temperature > 0,
58
+ "return_full_text": False,
59
+ }
60
+ output = pipe(input_text, **generation_args)
61
+ response = output[0]["generated_text"]
62
 
63
+ # 返回流式响应
64
+ for token in response:
65
+ yield token
66
 
67
+ # Gradio 界面
 
 
 
68
  demo = gr.ChatInterface(
69
  respond,
70
  additional_inputs=[
 
73
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
74
  gr.Slider(
75
  minimum=0.1,
76
+ maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"
 
 
 
77
  ),
78
  ],
79
  )
80
 
 
81
  if __name__ == "__main__":
82
+ demo.launch()
gitignore.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ phi-4