individual then add offset method
Browse files- App/Generate/database/Model.py +25 -13
App/Generate/database/Model.py
CHANGED
@@ -23,18 +23,18 @@ class WordAlignment(BaseModel):
|
|
23 |
hasFailedAlignment: bool
|
24 |
|
25 |
@classmethod
|
26 |
-
def from_old_format(cls, data: dict):
|
27 |
return cls(
|
28 |
text=data["word"],
|
29 |
alignedWord=data["alignedWord"],
|
30 |
-
start=data["startTime"],
|
31 |
-
end=data["endTime"],
|
32 |
hasFailedAlignment=data["hasFailedAlignment"],
|
33 |
)
|
34 |
|
35 |
|
36 |
-
def transform_alignment_data(data: List[dict]) -> List[dict]:
|
37 |
-
return [WordAlignment.from_old_format(item).model_dump() for item in data]
|
38 |
|
39 |
|
40 |
class Project(orm.Model):
|
@@ -62,6 +62,7 @@ class Project(orm.Model):
|
|
62 |
image_assets = []
|
63 |
video_assets = []
|
64 |
audio_assets = []
|
|
|
65 |
|
66 |
transitions = [
|
67 |
# "WaveRight_transparent.webm",
|
@@ -104,6 +105,10 @@ class Project(orm.Model):
|
|
104 |
}
|
105 |
)
|
106 |
|
|
|
|
|
|
|
|
|
107 |
## images and transitions
|
108 |
for image in scene.images:
|
109 |
file_name = str(uuid.uuid4()) + ".png"
|
@@ -144,14 +149,15 @@ class Project(orm.Model):
|
|
144 |
"height": 1920,
|
145 |
"width": 1080,
|
146 |
}
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
|
|
151 |
|
152 |
-
except Exception as e:
|
153 |
-
|
154 |
-
|
155 |
|
156 |
await self.update(**self.__dict__)
|
157 |
return {"links": self.links, "assets": self.assets, "constants": self.constants}
|
@@ -167,7 +173,7 @@ class Project(orm.Model):
|
|
167 |
links.append(narration.narration_link)
|
168 |
|
169 |
transcript = await narration.tts._make_transcript(links=links, text=text)
|
170 |
-
transcript = transform_alignment_data(transcript)
|
171 |
return transcript
|
172 |
|
173 |
|
@@ -191,6 +197,12 @@ class Scene(orm.Model):
|
|
191 |
"narration_link": orm.String(max_length=10_000, allow_null=True, default=""),
|
192 |
}
|
193 |
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
async def generate_scene_data(self):
|
195 |
# Run narrate() and generate_images() concurrently
|
196 |
await asyncio.gather(self.narrate(), self.generate_images())
|
|
|
23 |
hasFailedAlignment: bool
|
24 |
|
25 |
@classmethod
|
26 |
+
def from_old_format(cls, data: dict, offset: float = 0.0):
|
27 |
return cls(
|
28 |
text=data["word"],
|
29 |
alignedWord=data["alignedWord"],
|
30 |
+
start=data["startTime"] + offset,
|
31 |
+
end=data["endTime"] + offset,
|
32 |
hasFailedAlignment=data["hasFailedAlignment"],
|
33 |
)
|
34 |
|
35 |
|
36 |
+
def transform_alignment_data(data: List[dict], offset: float = 0.0) -> List[dict]:
|
37 |
+
return [WordAlignment.from_old_format(item, offset).model_dump() for item in data]
|
38 |
|
39 |
|
40 |
class Project(orm.Model):
|
|
|
62 |
image_assets = []
|
63 |
video_assets = []
|
64 |
audio_assets = []
|
65 |
+
text_stream = []
|
66 |
|
67 |
transitions = [
|
68 |
# "WaveRight_transparent.webm",
|
|
|
105 |
}
|
106 |
)
|
107 |
|
108 |
+
# generate transcripts
|
109 |
+
temp = await scene.generate_scene_transcript(offset=self.start)
|
110 |
+
text_stream.extend(temp)
|
111 |
+
|
112 |
## images and transitions
|
113 |
for image in scene.images:
|
114 |
file_name = str(uuid.uuid4()) + ".png"
|
|
|
149 |
"height": 1920,
|
150 |
"width": 1080,
|
151 |
}
|
152 |
+
self.assets.append({"type": "text", "sequence": text_stream})
|
153 |
+
# try:
|
154 |
+
# text_stream = await self.generate_transcript()
|
155 |
+
# print(text_stream)
|
156 |
+
# self.assets.append({"type": "text", "sequence": text_stream})
|
157 |
|
158 |
+
# except Exception as e:
|
159 |
+
# print(f"Text sequence failed, {e}")
|
160 |
+
# pass
|
161 |
|
162 |
await self.update(**self.__dict__)
|
163 |
return {"links": self.links, "assets": self.assets, "constants": self.constants}
|
|
|
173 |
links.append(narration.narration_link)
|
174 |
|
175 |
transcript = await narration.tts._make_transcript(links=links, text=text)
|
176 |
+
# transcript = transform_alignment_data(transcript)
|
177 |
return transcript
|
178 |
|
179 |
|
|
|
197 |
"narration_link": orm.String(max_length=10_000, allow_null=True, default=""),
|
198 |
}
|
199 |
|
200 |
+
async def generate_scene_transcript(self, offset):
|
201 |
+
links = [self.narration_link]
|
202 |
+
text = self.narration
|
203 |
+
transcript = await self.tts._make_transcript(links=links, text=text)
|
204 |
+
return transform_alignment_data(data=transcript, offset=offset)
|
205 |
+
|
206 |
async def generate_scene_data(self):
|
207 |
# Run narrate() and generate_images() concurrently
|
208 |
await asyncio.gather(self.narrate(), self.generate_images())
|