Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ client = InferenceClient("google/gemma-2-27b-it")
|
|
7 |
|
8 |
def generate_text(messages):
|
9 |
generated = ""
|
10 |
-
for token in client.chat_completion(messages, max_tokens=
|
11 |
content = (token.choices[0].delta.content)
|
12 |
generated+=content
|
13 |
print(generated)
|
@@ -40,11 +40,17 @@ head = '''
|
|
40 |
'''
|
41 |
with gr.Blocks(title="LLM with TTS",head=head) as demo:
|
42 |
gr.Markdown("## Please be patient, the first response may have a delay of up to 20 seconds while loading.")
|
43 |
-
gr.Markdown("**
|
|
|
44 |
js = """
|
|
|
|
|
|
|
|
|
|
|
45 |
function(chatbot){
|
46 |
text = (chatbot[chatbot.length -1])["content"]
|
47 |
-
window.MatchaTTSEn(text)
|
48 |
}
|
49 |
"""
|
50 |
chatbot = gr.Chatbot(type="messages")
|
|
|
7 |
|
8 |
def generate_text(messages):
|
9 |
generated = ""
|
10 |
+
for token in client.chat_completion(messages, max_tokens=50,stream=True):
|
11 |
content = (token.choices[0].delta.content)
|
12 |
generated+=content
|
13 |
print(generated)
|
|
|
40 |
'''
|
41 |
with gr.Blocks(title="LLM with TTS",head=head) as demo:
|
42 |
gr.Markdown("## Please be patient, the first response may have a delay of up to 20 seconds while loading.")
|
43 |
+
gr.Markdown("**gemma-2-27b-it/LJSpeech**.LLM and TTS models will change without notice.")
|
44 |
+
|
45 |
js = """
|
46 |
+
function replaceSpecialChars(text) {
|
47 |
+
const pattern = /[^a-zA-Z0-9,.!?-_']/g;
|
48 |
+
return text.replace(pattern, ' ');
|
49 |
+
}
|
50 |
+
|
51 |
function(chatbot){
|
52 |
text = (chatbot[chatbot.length -1])["content"]
|
53 |
+
window.MatchaTTSEn(replaceSpecialChars(text))
|
54 |
}
|
55 |
"""
|
56 |
chatbot = gr.Chatbot(type="messages")
|