saad177 commited on
Commit
8647b8a
1 Parent(s): 6ab6e01

add chatgpt to interact with outpu

Browse files
Files changed (1) hide show
  1. app.py +46 -4
app.py CHANGED
@@ -1,15 +1,26 @@
1
  from transformers import pipeline
2
  import gradio as gr
3
  from pytube import YouTube
 
 
4
 
5
  model = pipeline(model="SofiaK/checkpoints")
6
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  def youtube_to_text(youtube_url):
9
  video = YouTube(youtube_url).streams.filter(only_audio=True).all()
10
  audio = video[0].download()
11
  text = model(audio)["text"]
12
- # todo : remove the audio file after the inference
13
  return text
14
 
15
 
@@ -30,6 +41,8 @@ with gr.Blocks() as demo:
30
  btn = gr.Button("Submit")
31
  with gr.Column():
32
  output = gr.Text(label="Model Output")
 
 
33
 
34
  def make_visible(val):
35
  audio_visible = val == "Audio"
@@ -38,14 +51,43 @@ with gr.Blocks() as demo:
38
  youtube_input: {"visible": not audio_visible, "__type__": "update"},
39
  }
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  radio.change(make_visible, inputs=radio, outputs=[audio_input, youtube_input])
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  btn.click(
44
- fn=lambda audio_input, youtube_input, radio: model(audio_input)["text"]
45
- if radio == "Audio"
46
- else youtube_to_text(youtube_input),
47
  inputs=[audio_input, youtube_input, radio],
48
  outputs=output,
49
  )
50
 
 
51
  demo.launch()
 
1
  from transformers import pipeline
2
  import gradio as gr
3
  from pytube import YouTube
4
+ import os
5
+ import openai
6
 
7
  model = pipeline(model="SofiaK/checkpoints")
8
 
9
+ openai_api_key = os.getenv("OPENAI_API_KEY")
10
+
11
+ # Check if the API key is available
12
+ if openai_api_key is None:
13
+ raise ValueError(
14
+ "OpenAI API key is not set. Make sure to set it as a secret variable in Hugging Face Spaces."
15
+ )
16
+
17
+ openai.api_key = openai_api_key
18
+
19
 
20
  def youtube_to_text(youtube_url):
21
  video = YouTube(youtube_url).streams.filter(only_audio=True).all()
22
  audio = video[0].download()
23
  text = model(audio)["text"]
 
24
  return text
25
 
26
 
 
41
  btn = gr.Button("Submit")
42
  with gr.Column():
43
  output = gr.Text(label="Model Output")
44
+ chatbot = gr.Chatbot()
45
+ msg = gr.Textbox()
46
 
47
  def make_visible(val):
48
  audio_visible = val == "Audio"
 
51
  youtube_input: {"visible": not audio_visible, "__type__": "update"},
52
  }
53
 
54
+ history_gpt = []
55
+
56
+ def respond(msg, chat_history):
57
+ history_gpt.append({"role": "user", "content": msg})
58
+ response = openai.chat.completions.create(
59
+ model="gpt-3.5-turbo", messages=history_gpt
60
+ )
61
+ history_gpt.append(
62
+ {"role": "assistant", "content": response.choices[0].message.content}
63
+ )
64
+ chat_history.append((msg, response.choices[0].message.content))
65
+ return "", chat_history
66
+
67
  radio.change(make_visible, inputs=radio, outputs=[audio_input, youtube_input])
68
 
69
+ msg.submit(respond, [msg, chatbot], [msg, chatbot])
70
+
71
+ def transcript(audio_input, youtube_input, radio):
72
+ if radio == "Audio":
73
+ txt = model(audio_input["text"])
74
+ else:
75
+ txt = youtube_to_text(youtube_input)
76
+ history_gpt.append(
77
+ {
78
+ "role": "system",
79
+ "content": "Here is a text in Russian that was transcripted from an audio or a video. The user will ask questions about this text such as to translate it to another language, to summarize it, or to get relevant information. By default respond in english, apart if the user tells you otherwise. Here is the text"
80
+ + txt,
81
+ }
82
+ )
83
+ print("tjrujf", txt)
84
+ return txt
85
+
86
  btn.click(
87
+ fn=transcript,
 
 
88
  inputs=[audio_input, youtube_input, radio],
89
  outputs=output,
90
  )
91
 
92
+
93
  demo.launch()