Spaces:
Sleeping
Sleeping
wissemkarous
commited on
Commit
•
4d4096e
1
Parent(s):
a25c576
init
Browse files
app.py
CHANGED
@@ -60,9 +60,86 @@
|
|
60 |
# st.info("Author ©️ : wissem karous ")
|
61 |
# st.info("Made with ❤️ ")
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
import streamlit as st
|
65 |
import os
|
|
|
66 |
from utils.demo import load_video, ctc_decode
|
67 |
from utils.two_stream_infer import load_model
|
68 |
from scripts.extract_lip_coordinates import generate_lip_coordinates
|
@@ -72,7 +149,7 @@ st.set_page_config(layout="wide")
|
|
72 |
|
73 |
model = load_model()
|
74 |
|
75 |
-
st.title("Lipreading
|
76 |
|
77 |
st.info(
|
78 |
"The inference speed is very slow on Huggingface spaces due to it being processed entirely on CPU ",
|
@@ -90,18 +167,27 @@ next_video = options[next_video_index]
|
|
90 |
|
91 |
col1, col2 = st.columns(2)
|
92 |
|
93 |
-
# Function to display video in a column
|
94 |
def display_video(column, video_path, video_name):
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
# Displaying the selected video in the first column
|
101 |
with col1:
|
102 |
file_path = os.path.join("app_input", selected_video)
|
103 |
video_name = selected_video.split(".")[0]
|
104 |
display_video(col1, file_path, video_name)
|
|
|
105 |
# Displaying the next video in the second column
|
106 |
with col2:
|
107 |
st.info("Expected Result !")
|
@@ -109,7 +195,6 @@ with col2:
|
|
109 |
next_video_name = next_video.split(".")[0]
|
110 |
display_video(col2, next_file_path, next_video_name)
|
111 |
|
112 |
-
|
113 |
# Assuming further processing (like generating predictions) is only intended for the first (selected) video
|
114 |
with col1, st.spinner("Processing video..."):
|
115 |
video, img_p, files = load_video(f"{video_name}.mp4", opt.device)
|
@@ -135,3 +220,4 @@ with col2:
|
|
135 |
|
136 |
st.info("Author ©️ : Wissem Karous ")
|
137 |
st.info("Made with ❤️")
|
|
|
|
60 |
# st.info("Author ©️ : wissem karous ")
|
61 |
# st.info("Made with ❤️ ")
|
62 |
|
63 |
+
///////////////////
|
64 |
+
# import streamlit as st
|
65 |
+
# import os
|
66 |
+
# from utils.demo import load_video, ctc_decode
|
67 |
+
# from utils.two_stream_infer import load_model
|
68 |
+
# from scripts.extract_lip_coordinates import generate_lip_coordinates
|
69 |
+
# import options as opt
|
70 |
+
|
71 |
+
# st.set_page_config(layout="wide")
|
72 |
+
|
73 |
+
# model = load_model()
|
74 |
+
|
75 |
+
# st.title("Lipreading final year project Demo")
|
76 |
+
|
77 |
+
# st.info(
|
78 |
+
# "The inference speed is very slow on Huggingface spaces due to it being processed entirely on CPU ",
|
79 |
+
# icon="ℹ️",
|
80 |
+
# )
|
81 |
+
|
82 |
+
# # Generating a list of options or videos
|
83 |
+
# options = sorted(os.listdir(os.path.join("app_input"))) # Ensure the list is sorted
|
84 |
+
# selected_video = st.selectbox("Choose video", options)
|
85 |
+
|
86 |
+
# # Find the index of the selected video and calculate the index of the next video
|
87 |
+
# selected_index = options.index(selected_video)
|
88 |
+
# next_video_index = (selected_index + 1) % len(options) # Ensures looping back to start
|
89 |
+
# next_video = options[next_video_index]
|
90 |
+
|
91 |
+
# col1, col2 = st.columns(2)
|
92 |
+
|
93 |
+
# # Function to display video in a column
|
94 |
+
# def display_video(column, video_path, video_name):
|
95 |
+
# os.system(f"ffmpeg -i {video_path} -vcodec libx264 {video_name}.mp4 -y")
|
96 |
+
# video = open(f"{video_name}.mp4", "rb")
|
97 |
+
# video_bytes = video.read()
|
98 |
+
# column.video(video_bytes)
|
99 |
+
|
100 |
+
# # Displaying the selected video in the first column
|
101 |
+
# with col1:
|
102 |
+
# file_path = os.path.join("app_input", selected_video)
|
103 |
+
# video_name = selected_video.split(".")[0]
|
104 |
+
# display_video(col1, file_path, video_name)
|
105 |
+
# # Displaying the next video in the second column
|
106 |
+
# with col2:
|
107 |
+
# st.info("Expected Result !")
|
108 |
+
# next_file_path = os.path.join("app_input", next_video)
|
109 |
+
# next_video_name = next_video.split(".")[0]
|
110 |
+
# display_video(col2, next_file_path, next_video_name)
|
111 |
+
|
112 |
+
|
113 |
+
# # Assuming further processing (like generating predictions) is only intended for the first (selected) video
|
114 |
+
# with col1, st.spinner("Processing video..."):
|
115 |
+
# video, img_p, files = load_video(f"{video_name}.mp4", opt.device)
|
116 |
+
# coordinates = generate_lip_coordinates(f"{video_name}_samples")
|
117 |
+
# # Assuming 'frames_generated' and 'coordinates_generated' are used for control flow or further processing
|
118 |
+
# frames_generated = True
|
119 |
+
# coordinates_generated = True
|
120 |
+
# if frames_generated and coordinates_generated:
|
121 |
+
# st.markdown(f"Frames Generated for {video_name}:\n{files}")
|
122 |
+
# st.markdown(f"Coordinates Generated for {video_name}:\n{coordinates}")
|
123 |
+
|
124 |
+
# with col2:
|
125 |
+
# st.info("Ready to make prediction!")
|
126 |
+
# generate = st.button("Generate")
|
127 |
+
# if generate:
|
128 |
+
# with st.spinner("Generating..."):
|
129 |
+
# y = model(
|
130 |
+
# video[None, ...].to(opt.device),
|
131 |
+
# coordinates[None, ...].to(opt.device),
|
132 |
+
# )
|
133 |
+
# txt = ctc_decode(y[0])
|
134 |
+
# st.text(txt[-1])
|
135 |
+
|
136 |
+
# st.info("Author ©️ : Wissem Karous ")
|
137 |
+
# st.info("Made with ❤️")
|
138 |
+
/////////////////////
|
139 |
|
140 |
import streamlit as st
|
141 |
import os
|
142 |
+
import cv2
|
143 |
from utils.demo import load_video, ctc_decode
|
144 |
from utils.two_stream_infer import load_model
|
145 |
from scripts.extract_lip_coordinates import generate_lip_coordinates
|
|
|
149 |
|
150 |
model = load_model()
|
151 |
|
152 |
+
st.title("Lipreading Final Year Project Demo")
|
153 |
|
154 |
st.info(
|
155 |
"The inference speed is very slow on Huggingface spaces due to it being processed entirely on CPU ",
|
|
|
167 |
|
168 |
col1, col2 = st.columns(2)
|
169 |
|
170 |
+
# Function to display video in a column with resizing
|
171 |
def display_video(column, video_path, video_name):
|
172 |
+
cap = cv2.VideoCapture(video_path)
|
173 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
174 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
175 |
+
new_width = 320 # Adjust this value for desired width
|
176 |
+
new_height = int((new_width / width) * height)
|
177 |
+
while cap.isOpened():
|
178 |
+
ret, frame = cap.read()
|
179 |
+
if not ret:
|
180 |
+
break
|
181 |
+
frame = cv2.resize(frame, (new_width, new_height))
|
182 |
+
column.image(frame, channels="BGR")
|
183 |
+
cap.release()
|
184 |
|
185 |
# Displaying the selected video in the first column
|
186 |
with col1:
|
187 |
file_path = os.path.join("app_input", selected_video)
|
188 |
video_name = selected_video.split(".")[0]
|
189 |
display_video(col1, file_path, video_name)
|
190 |
+
|
191 |
# Displaying the next video in the second column
|
192 |
with col2:
|
193 |
st.info("Expected Result !")
|
|
|
195 |
next_video_name = next_video.split(".")[0]
|
196 |
display_video(col2, next_file_path, next_video_name)
|
197 |
|
|
|
198 |
# Assuming further processing (like generating predictions) is only intended for the first (selected) video
|
199 |
with col1, st.spinner("Processing video..."):
|
200 |
video, img_p, files = load_video(f"{video_name}.mp4", opt.device)
|
|
|
220 |
|
221 |
st.info("Author ©️ : Wissem Karous ")
|
222 |
st.info("Made with ❤️")
|
223 |
+
|