Tonic commited on
Commit
b929c63
β€’
1 Parent(s): 90c1879

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +163 -0
app.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+ import os
5
+ import copy
6
+ import re
7
+ import secrets
8
+ from pathlib import Path
9
+ from pydub import AudioSegment
10
+
11
+ # Initialize the model and tokenizer
12
+ torch.manual_seed(420)
13
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-Audio-Chat", trust_remote_code=True)
14
+ model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-Audio-Chat", device_map="cuda", trust_remote_code=True).eval()
15
+
16
+ def _parse_text(text):
17
+ lines = text.split("\n")
18
+ lines = [line for line in lines if line != ""]
19
+ count = 0
20
+ for i, line in enumerate(lines):
21
+ if "```" in line:
22
+ count += 1
23
+ items = line.split("`")
24
+ if count % 2 == 1:
25
+ lines[i] = f'<pre><code class="language-{items[-1]}">'
26
+ else:
27
+ lines[i] = f"<br></code></pre>"
28
+ else:
29
+ if i > 0:
30
+ if count % 2 == 1:
31
+ line = line.replace("`", r"\`")
32
+ line = line.replace("<", "&lt;")
33
+ line = line.replace(">", "&gt;")
34
+ line = line.replace(" ", "&nbsp;")
35
+ line = line.replace("*", "&ast;")
36
+ line = line.replace("_", "&lowbar;")
37
+ line = line.replace("-", "&#45;")
38
+ line = line.replace(".", "&#46;")
39
+ line = line.replace("!", "&#33;")
40
+ line = line.replace("(", "&#40;")
41
+ line = line.replace(")", "&#41;")
42
+ line = line.replace("$", "&#36;")
43
+ lines[i] = "<br>" + line
44
+ text = "".join(lines)
45
+ return text
46
+
47
+ def predict(_chatbot, task_history):
48
+ if not task_history:
49
+ return _chatbot
50
+
51
+ query = task_history[-1][0]
52
+ history_cp = copy.deepcopy(task_history)
53
+ history_filter = []
54
+ audio_idx = 1
55
+ pre = ""
56
+ last_audio = None
57
+
58
+ for i, (q, a) in enumerate(history_cp):
59
+ if isinstance(q, (tuple, list)):
60
+ last_audio = q[0]
61
+ q = f'Audio {audio_idx}: <audio>{q[0]}</audio>'
62
+ pre += q + '\n'
63
+ audio_idx += 1
64
+ else:
65
+ pre += q
66
+ history_filter.append((pre, a))
67
+ pre = ""
68
+
69
+ history, message = history_filter[:-1], history_filter[-1][0]
70
+ response, history = model.chat(tokenizer, message, history=history)
71
+
72
+ ts_pattern = r"<\|\d{1,2}\.\d+\|>"
73
+ all_time_stamps = re.findall(ts_pattern, response)
74
+ if (len(all_time_stamps) > 0) and (len(all_time_stamps) % 2 ==0) and last_audio:
75
+ ts_float = [ float(t.replace("<|","").replace("|>","")) for t in all_time_stamps]
76
+ ts_float_pair = [ts_float[i:i + 2] for i in range(0,len(all_time_stamps),2)]
77
+ # θ―»ε–ιŸ³ι’‘ζ–‡δ»Ά
78
+ format = os.path.splitext(last_audio)[-1].replace(".","")
79
+ audio_file = AudioSegment.from_file(last_audio, format=format)
80
+ chat_response_t = response.replace("<|", "").replace("|>", "")
81
+ chat_response = chat_response_t
82
+ temp_dir = secrets.token_hex(20)
83
+ temp_dir = Path(uploaded_file_dir) / temp_dir
84
+ temp_dir.mkdir(exist_ok=True, parents=True)
85
+ # ζˆͺε–ιŸ³ι’‘ζ–‡δ»Ά
86
+ for pair in ts_float_pair:
87
+ audio_clip = audio_file[pair[0] * 1000: pair[1] * 1000]
88
+ # δΏε­˜ιŸ³ι’‘ζ–‡δ»Ά
89
+ name = f"tmp{secrets.token_hex(5)}.{format}"
90
+ filename = temp_dir / name
91
+ audio_clip.export(filename, format=format)
92
+ _chatbot[-1] = (_parse_text(query), chat_response)
93
+ _chatbot.append((None, (str(filename),)))
94
+ else:
95
+ _chatbot[-1] = (_parse_text(query), response)
96
+ full_response = _parse_text(response)
97
+ task_history[-1] = (query, full_response)
98
+ print("Qwen-Audio-Chat: " + _parse_text(full_response))
99
+ return _chatbot
100
+
101
+ def regenerate(_chatbot, task_history):
102
+ if not task_history:
103
+ return _chatbot
104
+ item = task_history[-1]
105
+ if item[1] is None:
106
+ return _chatbot
107
+ task_history[-1] = (item[0], None)
108
+ chatbot_item = _chatbot.pop(-1)
109
+ if chatbot_item[0] is None:
110
+ _chatbot[-1] = (_chatbot[-1][0], None)
111
+ else:
112
+ _chatbot.append((chatbot_item[0], None))
113
+ return predict(_chatbot, task_history)
114
+
115
+ def add_text(history, task_history, text):
116
+ history = history + [(_parse_text(text), None)]
117
+ task_history = task_history + [(text, None)]
118
+ return history, task_history, ""
119
+
120
+ def add_file(history, task_history, file):
121
+ history = history + [((file.name,), None)]
122
+ task_history = task_history + [((file.name,), None)]
123
+ return history, task_history
124
+
125
+ def add_mic(history, task_history, file):
126
+ if file is None:
127
+ return history, task_history
128
+ os.rename(file, file + '.wav')
129
+ print("add_mic file:", file)
130
+ print("add_mic history:", history)
131
+ print("add_mic task_history:", task_history)
132
+ # history = history + [((file.name,), None)]
133
+ # task_history = task_history + [((file.name,), None)]
134
+ task_history = task_history + [((file + '.wav',), None)]
135
+ history = history + [((file + '.wav',), None)]
136
+ print("task_history", task_history)
137
+ return history, task_history
138
+
139
+ def reset_user_input():
140
+ return gr.update(value="")
141
+
142
+ def reset_state(task_history):
143
+ task_history.clear()
144
+ return []
145
+
146
+ iface = gr.Interface(
147
+ fn=chat_with_model,
148
+ inputs=[
149
+ gr.inputs.Audio(label="Audio Input"),
150
+ gr.inputs.Textbox(label="Text Query"),
151
+ gr.State()
152
+ ],
153
+ outputs=[
154
+ "text",
155
+ gr.State()
156
+ ],
157
+ title="Audio-Text Interaction Model",
158
+ description="This model can process an audio input along with a text query and provide a response.",
159
+ theme="default",
160
+ allow_flagging="never"
161
+ )
162
+
163
+ iface.launch()