NoorIlyas commited on
Commit
68da411
·
verified ·
1 Parent(s): 635832a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -31
app.py CHANGED
@@ -1,37 +1,18 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
- from PIL import Image
4
- import requests
5
- from io import BytesIO
6
- python -m pip install
7
 
 
 
8
 
9
- # Load the emotion detection model from Hugging Face's Transformers library
10
- emotion_model = pipeline("image-classification", model="EleutherAI/emotion-english", device=0)
 
 
 
 
11
 
12
- # Define the function to make predictions on user-uploaded images
13
- def predict_emotion(image):
14
- # Open the uploaded image
15
- img = Image.open(BytesIO(image.encode("utf-8")))
16
 
17
- # Make prediction using the emotion detection model
18
- result = emotion_model(img)
19
-
20
- # Extract the emotion label with the highest confidence
21
- emotion = result[0]['label']
22
-
23
- return f"Predicted emotion: {emotion}"
24
-
25
- # Define the Gradio interface
26
- iface = gr.Interface(
27
- fn=predict_emotion,
28
- inputs=gr.Image(),
29
- outputs="text",
30
- live=True,
31
- interpretation="default",
32
- title="Face Expression Predictor",
33
- description="Upload an image and get the predicted face expression."
34
- )
35
-
36
- # Launch the Gradio app
37
  iface.launch()
 
1
  import gradio as gr
2
+ from easytts import EasyTTS
 
 
 
 
3
 
4
+ # Initialize the EasyTTS model
5
+ tts_model = EasyTTS()
6
 
7
+ def text_to_speech(text_input):
8
+ # Use EasyTTS to convert text to speech
9
+ audio_data = tts_model.synthesize(text_input)
10
+
11
+ # Display and play the audio
12
+ gr.output.Audio(audio_data).play()
13
 
14
+ # Create a Gradio interface
15
+ iface = gr.Interface(fn=text_to_speech, inputs="text", outputs=None)
 
 
16
 
17
+ # Launch the interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  iface.launch()