gorkemgoknar gorkemgoknar commited on
Commit
b53f7c7
0 Parent(s):

Duplicate from gorkemgoknar/movie_chat_gpt_yourtts

Browse files

Co-authored-by: Gorkem Goknar <gorkemgoknar@users.noreply.huggingface.co>

Files changed (5) hide show
  1. .gitattributes +27 -0
  2. README.md +38 -0
  3. app.py +220 -0
  4. dragon.wav +0 -0
  5. requirements.txt +5 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Moviechatbot - GPT chatbot with Coqui YourTTS
3
+ emoji: 👀
4
+ colorFrom: blue
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ app_file: app.py
8
+ pinned: false
9
+ duplicated_from: gorkemgoknar/movie_chat_gpt_yourtts
10
+ ---
11
+
12
+ # Configuration
13
+
14
+ `title`: _string_
15
+ Poc with no model first
16
+
17
+ `emoji`: _string_
18
+ Space emoji (emoji-only character allowed)
19
+
20
+ `colorFrom`: _string_
21
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
+
23
+ `colorTo`: _string_
24
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
+
26
+ `sdk`: _string_
27
+ Can be either `gradio` or `streamlit`
28
+
29
+ `sdk_version` : _string_
30
+ Only applicable for `streamlit` SDK.
31
+ See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
32
+
33
+ `app_file`: _string_
34
+ Path to your main application file (which contains either `gradio` or `streamlit` Python code).
35
+ Path is relative to the root of the repository.
36
+
37
+ `pinned`: _boolean_
38
+ Whether the Space stays on top of your list.
app.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import random
3
+ import torch
4
+ from transformers import AutoConfig, AutoTokenizer, AutoModelWithLMHead
5
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
6
+ from itertools import chain
7
+
8
+ import os
9
+
10
+ import tempfile
11
+ from typing import Optional
12
+ from TTS.config import load_config
13
+ import numpy as np
14
+ from TTS.utils.manage import ModelManager
15
+ from TTS.utils.synthesizer import Synthesizer
16
+
17
+ #emotion_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-emotion")
18
+ #emotion_model = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-emotion")
19
+
20
+ def get_emotion(text):
21
+ input_ids = tokenizer.encode(text + '</s>', return_tensors='pt')
22
+ output = model.generate(input_ids=input_ids,max_length=2)
23
+ dec = [tokenizer.decode(ids) for ids in output]
24
+ label = dec[0]
25
+ return label.split()[1]
26
+
27
+
28
+ config = AutoConfig.from_pretrained('gorkemgoknar/gpt2chatbotenglish')
29
+ model = GPT2LMHeadModel.from_pretrained('gorkemgoknar/gpt2chatbotenglish', config=config)
30
+
31
+ tokenizer = GPT2Tokenizer.from_pretrained('gorkemgoknar/gpt2chatbotenglish')
32
+ tokenizer.model_max_length = 1024
33
+
34
+ #Dynamic Temperature
35
+ #See experiment https://www.linkedin.com/pulse/ai-goes-job-interview-g%25C3%25B6rkem-g%25C3%25B6knar
36
+
37
+ base_temperature = 1.2
38
+ dynamic_temperature_range = 0.15
39
+
40
+ rand_range = random.uniform(-1 * dynamic_temperature_range , dynamic_temperature_range )
41
+ temperature = base_temperature + rand_range
42
+
43
+ SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]
44
+
45
+ #See document for experiment https://www.linkedin.com/pulse/ai-goes-job-interview-g%C3%B6rkem-g%C3%B6knar/
46
+
47
+ def get_chat_response(name,history=[], input_txt = "Hello , what is your name?"):
48
+
49
+ ai_history = history.copy()
50
+
51
+ #ai_history.append(input_txt)
52
+ ai_history_e = [tokenizer.encode(e) for e in ai_history]
53
+
54
+ personality = "My name is " + name
55
+
56
+ bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1])
57
+
58
+ #persona first, history next, input text must be at the end
59
+ #[[bos, persona] , [history] , [input]]
60
+ sequence = [[bos] + tokenizer.encode(personality)] + ai_history_e + [tokenizer.encode(input_txt)]
61
+ ##[[bos, persona] , [speaker1 .., speakser2 .., speaker1 ... speaker2 ... , [input]]
62
+ sequence = [sequence[0]] + [[speaker2 if (len(sequence)-i) % 2 else speaker1] + s for i, s in enumerate(sequence[1:])]
63
+
64
+ sequence = list(chain(*sequence))
65
+
66
+ #bot_input_ids = tokenizer.encode(personality + tokenizer.eos_token + input_txt + tokenizer.eos_token , return_tensors='pt')
67
+ sequence_len = len(sequence)
68
+
69
+ #optimum response and speed
70
+ chat_history_ids = model.generate(
71
+ torch.tensor(sequence).unsqueeze(0), max_length=50,
72
+ pad_token_id=tokenizer.eos_token_id,
73
+ no_repeat_ngram_size=3,
74
+ do_sample=True,
75
+ top_k=60,
76
+ top_p=0.8,
77
+ temperature = 1.3
78
+ )
79
+ out_str = tokenizer.decode(chat_history_ids[0][sequence_len:], skip_special_tokens=True)
80
+ #out_str = tokenizer.decode(chat_history_ids[:, sequence.shape[-1]:][0], skip_special_tokens=False)
81
+ return out_str
82
+
83
+ ##you can use anyone from below
84
+ '''
85
+ | Macleod | Moran | Brenda | Ramirez | Peter Parker | Quentin Beck | Andy
86
+ | Red | Norton | Willard | Chief | Chef | Kilgore | Kurtz | Westley | Buttercup
87
+ | Vizzini | Fezzik | Inigo | Man In Black | Taylor | Zira | Zaius | Cornelius
88
+ | Bud | Lindsey | Hippy | Erin | Ed | George | Donna | Trinity | Agent Smith
89
+ | Morpheus | Neo | Tank | Meryl | Truman | Marlon | Christof | Stromboli | Bumstead
90
+ | Schreber | Walker | Korben | Cornelius | Loc Rhod | Anakin | Obi-Wan | Palpatine
91
+ | Padme | Superman | Luthor | Dude | Walter | Donny | Maude | General | Starkiller
92
+ | Indiana | Willie | Short Round | John | Sarah | Terminator | Miller | Sarge | Reiben
93
+ | Jackson | Upham | Chuckie | Will | Lambeau | Sean | Skylar | Saavik | Spock
94
+ | Kirk | Bones | Khan | Kirk | Spock | Sybok | Scotty | Bourne | Pamela | Abbott
95
+ | Nicky | Marshall | Korshunov | Troy | Vig | Archie Gates | Doc | Interrogator
96
+ | Ellie | Ted | Peter | Drumlin | Joss | Macready | Childs | Nicholas | Conrad
97
+ | Feingold | Christine | Adam | Barbara | Delia | Lydia | Cathy | Charles | Otho
98
+ | Schaefer | Han | Luke | Leia | Threepio | Vader | Yoda | Lando | Elaine | Striker
99
+ | Dr. Rumack | Kramer | David | Saavik | Kirk | Kruge | Holden | Deckard | Rachael
100
+ | Batty | Sebastian | Sam | Frodo | Pippin | Gandalf | Kay | Edwards | Laurel
101
+ | Edgar | Zed | Jay | Malloy | Plissken | Steve Rogers | Tony Stark | Scott Lang
102
+ | Bruce Banner | Bruce | Edward | Two-Face | Batman | Chase | Alfred | Dick
103
+ | Riddler | Din Djarin | Greef Karga | Kuiil | Ig-11 | Cara Dune | Peli Motto
104
+ | Toro Calican | Ripley | Meredith | Dickie | Marge | Peter | Lambert | Kane
105
+ | Dallas | Ripley | Ash | Parker | Threepio | Luke | Leia | Ben | Han | Common Bob
106
+ | Common Alice | Jack | Tyler | Marla | Dana | Stantz | Venkman | Spengler | Louis
107
+ | Fry | Johns | Riddick | Kirk | Decker | Spock | "Ilia | Indy | Belloq | Marion
108
+ | Brother | Allnut | Rose | Qui-Gon | Jar Jar
109
+ '''
110
+
111
+ MODEL_NAME= "tts_models/multilingual/multi-dataset/your_tts"
112
+
113
+
114
+
115
+ def greet(character,your_voice,message,history):
116
+
117
+ #gradios set_state/get_state had problems on embedded html!
118
+ history = history or {"character": character, "message_history" : [] }
119
+ #gradios set_state/get_state does not persist session for now using global
120
+ #global history
121
+
122
+ if history["character"] != character:
123
+ #switching character
124
+ history = {"character": character, "message_history" : [] }
125
+
126
+
127
+ response = get_chat_response(character,history=history["message_history"],input_txt=message)
128
+ os.system('tts --text "'+response+'" --model_name tts_models/multilingual/multi-dataset/your_tts --speaker_wav '+your_voice+' --language_idx "en"')
129
+
130
+ history["message_history"].append((message, response))
131
+
132
+ #emotion = get_emotion(response)
133
+
134
+ html = "<div class='chatbot'>"
135
+ for user_msg, resp_msg in history["message_history"]:
136
+ html += f"<div class='user_msg'>You: {user_msg}</div>"
137
+ html += f"<div class='resp_msg'>{character}: {resp_msg}</div>"
138
+ html += "</div>"
139
+
140
+ return html,history,"tts_output.wav"
141
+
142
+
143
+ def greet_textonly(character,message,history):
144
+
145
+ #gradios set_state/get_state had problems on embedded html!
146
+ history = history or {"character": character, "message_history" : [] }
147
+ #gradios set_state/get_state does not persist session for now using global
148
+ #global history
149
+
150
+ if history["character"] != character:
151
+ #switching character
152
+ history = {"character": character, "message_history" : [] }
153
+
154
+
155
+ response = get_chat_response(character,history=history["message_history"],input_txt=message)
156
+
157
+ history["message_history"].append((message, response))
158
+
159
+ #emotion = get_emotion(response)
160
+
161
+ html = "<div class='chatbot'>"
162
+ for user_msg, resp_msg in history["message_history"]:
163
+ html += f"<div class='user_msg'>You: {user_msg}</div>"
164
+ html += f"<div class='resp_msg'>{character}: {resp_msg}</div>"
165
+ html += "</div>"
166
+
167
+ return html,history
168
+
169
+
170
+ personality_choices = ["Gandalf", "Riddick", "Macleod", "Morpheus", "Neo","Spock","Vader","Indy"]
171
+
172
+ examples= ["Gandalf", "What is your name?"]
173
+
174
+ css="""
175
+ .chatbox {display:flex;flex-direction:column}
176
+ .user_msg, .resp_msg {padding:4px;margin-bottom:4px;border-radius:4px;width:80%}
177
+ .user_msg {background-color:cornflowerblue;color:white;align-self:start}
178
+ .resp_msg {background-color:lightgray;align-self:self-end}
179
+ """
180
+
181
+
182
+ #some selected ones are in for demo use
183
+ personality_choices = ["Gandalf", "Riddick", "Macleod", "Morpheus", "Neo","Spock","Vader","Indy", "Ig-11","Threepio","Tony Stark","Batman","Vizzini"]
184
+ title = "Movie Chatbot with Coqui YourTTS"
185
+ description = "Chat with your favorite movie characters, making characters voice like you. Test it out in metayazar.com/chatbot for more movie/character options. See Coqui Space for more TTS models https://huggingface.co/spaces/coqui/CoquiTTS"
186
+ article = "<p style='text-align: center'><a href='https://www.linkedin.com/pulse/ai-goes-job-interview-g%C3%B6rkem-g%C3%B6knar/' target='_blank'>AI Goes to Job Interview</a> | <a href='https://www.metayazar.com/' target='_blank'>Metayazar AI Writer</a> |<a href='https://www.linkedin.com/in/goknar/' target='_blank'>Görkem Göknar</a></p>"
187
+
188
+ #History not implemented in this demo, use metayazar.com/chatbot for a movie and character dropdown chat interface
189
+ ##interface = gr.Interface(fn=greet, inputs=[gr.inputs.Dropdown(personality_choices) ,"text"], title=title, description=description, outputs="text")
190
+
191
+ examples=[['Gandalf','dragon.wav','Who are you sir?',{}]]
192
+
193
+ history = {"character": "None", "message_history" : [] }
194
+
195
+ interface_mic= gr.Interface(fn=greet,
196
+ inputs=[gr.inputs.Dropdown(personality_choices),
197
+ gr.inputs.Audio(source="microphone", type="filepath") ,
198
+ "text",
199
+ "state"],
200
+ outputs=["html","state",gr.outputs.Audio(type="file")],
201
+ css=css, title=title, description=description,article=article )
202
+ interface_file= gr.Interface(fn=greet,
203
+ inputs=[gr.inputs.Dropdown(personality_choices),
204
+ gr.inputs.Audio(type="filepath"),
205
+ "text",
206
+ "state"],
207
+ outputs=["html","state",gr.outputs.Audio(type="file")],
208
+ css=css, title=title, description=description,article=article )
209
+
210
+ interface_text = gr.Interface.load("https://huggingface.co/spaces/gorkemgoknar/moviechatbot",
211
+ src="spaces",
212
+ inputs=[gr.inputs.Dropdown(personality_choices),
213
+ "text",
214
+ "state"],
215
+ outputs=["html","state"],
216
+ css=css, title="Chat Text Only", description=description,article=article)
217
+
218
+ # appinterface = gr.TabbedInterface([interface_mic, interface_file, interface_text], ["Chat with Record", "Chat with File Upload", "Chat Text only"])
219
+ if __name__ == "__main__":
220
+ interface_mic.launch()
dragon.wav ADDED
Binary file (58.2 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ gradio==3.12.0
4
+ tts
5
+ numpy