diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..d72c4349c7c6c3a6676989b6adb43619c654d95c 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +ffmpeg filter=lfs diff=lfs merge=lfs -text +temporary_uploads/** filter=lfs diff=lfs merge=lfs -text diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..b5b3e0cc44d6bfefbbdbf8b408391c39104883bd --- /dev/null +++ b/Dockerfile @@ -0,0 +1,45 @@ +FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 +LABEL maintainer="Hugging Face" + +ARG DEBIAN_FRONTEND=noninteractive + +RUN useradd -m -u 1000 user + +# Switch to the "user" user +USER user + +# Set home to the user's home directory +ENV HOME=/home/user \ + PATH=/home/user/.local/bin:$PATH + +WORKDIR /code + +COPY --chown=user ./requirements.txt /code/requirements.txt + +USER root + +RUN apt update +RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg +RUN apt-get update && apt-get install ffmpeg libsm6 libxext6 -y + +ARG PYTORCH='2.0.1' +ARG TORCH_VISION='' +ARG TORCH_AUDIO='' +# Example: `cu102`, `cu113`, etc. +ARG CUDA='cu118' + +RUN python3 -m pip install --no-cache-dir --upgrade pip +RUN [ ${#PYTORCH} -gt 0 ] && VERSION='torch=='$PYTORCH'.*' || VERSION='torch'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA +RUN [ ${#TORCH_VISION} -gt 0 ] && VERSION='torchvision=='TORCH_VISION'.*' || VERSION='torchvision'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA +RUN [ ${#TORCH_AUDIO} -gt 0 ] && VERSION='torchaudio=='TORCH_AUDIO'.*' || VERSION='torchaudio'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA + +RUN python3 -m pip install --no-cache-dir --upgrade -r /code/requirements.txt + +ENV OPENAI_API_KEY="" +ENV GOOGLE_APPLICATION_CREDENTIALS="" + +USER user + +COPY --chown=user . . + +CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"] diff --git a/README.md b/README.md index 890827d6f1a3179bfc5f05cecad11cf4560397b6..0af62f41f9e443091454e91746359d960dd627d2 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,96 @@ --- -title: ShortScribe Pipeline -emoji: 👀 -colorFrom: yellow -colorTo: yellow +title: Short Video Descriptions +emoji: 🌖 +colorFrom: red +colorTo: red sdk: docker pinned: false --- -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +# ShortScribe Pipeline + +This repository provides code for the paper [Making Short-Form Videos Accessible With Hierarchical Video Summaries](https://arxiv.org/abs/2402.10382). In this repository, we introduce the pipeline for ShortScribe, a tool that makes short-form videos accessible to blind and low-vision users by generating summaries at varying levels of depth. This source code specifically provides an API for generating the summaries. To see the source code for ShortScribe's interface, see this GitHub repository [here](https://github.com/tessvandaele/tiktok-simulation) + +# Installing ShortScribe + +Before building the environment to run the ShortScribe pipeline, ensure that you have enough resources and credentials. You will need Google Cloud service account credentials in the form of a JSON file and an OpenAI API Key. We deployed our system on an NVIDIA A100 GPU with about 50 GB memory using PyTorch 2.0.1+cu118. + +To get a local copy of the pipeline, run ```git lfs install``` and then ```git clone git@hf.co:spaces/akhil03/ShortScribe-Pipeline``` + +# Running ShortScribe + +The Dockerfile builds the environment with the necessary packages and then runs a development server to send API requests to. To setup the Docker container, perform the following steps: + +``` +docker build -t +docker run -p 80:80 +docker exec -it +``` + +If you are pushing this Dockerfile to your own Hugging Face repository, the Docker container will automatically build and execute, so you can ignore the above commands if this is the case. + +# API Calls + +The API Calls are designed such that the video to be summarized must first be uploaded to the `temporary_uploads/` folder before trying to send an API request. Please make sure that the video is titled as `.mp4` (e.g. `01.mp4`) and uploaded to `temporary_uploads/` before making any API calls. + +### getVideoData/ + +Given the video ID (specified in the file name), returns all of the data extracted from the video before being summarized by GPT-4. Returns a JSON object shown as follows: + +```json +{ + "start": "Sample text", // Summary of the extracted data of the entire video without a word limit (float) + "end": "Sample text", // The end of the shot in terms of seconds (float) + "text_on_screen": "Sample text", // On-screen text in the shot (string) + "transcript_text": "Sample text", // Audio transcript of the shot (string) + "image_captions": ["Sample text", "Sample text", "Sample text", "Sample text", "Sample text"], // Five candidate image captions generated by BLIP-2 (not sorted in any particular order) + "image_captions_clip": [ + { + "text": "Sample text", // Image caption generated by BLIP-2 + "score": 1.0, // Image caption similarity score generated by CLIP + }, + ... 4 more ... + ] +} +``` + +### getShotSummaries/ + +Given the video ID (specified by the file name), returns a list of JSON objects for each shot of the video. The format for each JSON object in the list is shown below: + +```json +{ + "start": 0.0, // The start of the shot in terms of seconds (float) + "end": 5.75, // The end of the shot in terms of seconds (float) + "text_on_screen": "Sample text", // On-screen text in the shot (string) + "per_shot_summaries": "Summary of the shot generated by GPT-4" // Summary of the shot (string) +} +``` + +### getVideoSummary/ + +Given the video ID (specified in the file name), returns all of the overall summaries of the video (short description, long description, 25-word description, 50-word description) generated by GPT-4. Returns a JSON object shown as follows: + +```json +{ + "video_description": "Sample text", // Summary of the extracted data of the entire video without a word limit + "summary_10": "Sample text", // Summary of the extracted data of the entire video in 10 words + "summary_25": "Sample text", // Summary of the extracted data of the entire video in 25 words + "summary_50": "Sample text" // Summary of the extracted data of the entire video in 50 words +} +``` + +# Credits and Citation + +If you have any questions or issues related to the source code, feel free to reach out to Akhil Iyer (akhil.iyer@utexas.edu) + +If our work is useful to you, please cite our work with the following citation: + +``` +@article{van2024making, + title={Making Short-Form Videos Accessible with Hierarchical Video Summaries}, + author={Van Daele, Tess and Iyer, Akhil and Zhang, Yuning and Derry, Jalyn C and Huh, Mina and Pavel, Amy}, + journal={arXiv preprint arXiv:2402.10382}, + year={2024} + } +``` diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..45be4edcfefffb0a9564474feba1c96801f401f9 --- /dev/null +++ b/app.py @@ -0,0 +1,682 @@ +import os +import gradio as gr +from google.cloud import videointelligence, speech, storage +import io +import json +import cv2 +import torch +import clip +from PIL import Image +from transformers import Blip2Processor, Blip2ForConditionalGeneration +import openai +import wave +from fastapi import FastAPI, File, UploadFile +from fastapi.responses import JSONResponse +import uvicorn +from pydantic import BaseModel + +clip_loaded, blip_loaded = False, False +cred_file = "" +os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = cred_file +os.environ["OPENAI_API_KEY"] = "" +openai_api_key = "" + + +def get_timestamps(video): + + tiktok_vid = video + + ffmpeg_command = """ffmpeg -i tiktokvideo -filter:v "select='gt(scene,0.2)',showinfo" -f null - 2> ffout""" + ffmpeg_command = ffmpeg_command.replace("tiktokvideo", tiktok_vid) + + grep_command = """grep showinfo ffout | grep 'pts_time:[0-9.]*' -o | grep '[0-9]*\.[0-9]*' -o > timestamps.txt""" + + os.system(ffmpeg_command) + os.system(grep_command) + + with open('timestamps.txt', "r") as t: + times = [0] + [float(k) for k in t.read().split("\n") if k] + + times_output = "Times: " + print(times) + for time in times: + times_output += str(time) + ", " + + return times_output + +def get_text_annotations(video, cred_file): + os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = cred_file + + # get text annotation results + # OCR + video_client = videointelligence.VideoIntelligenceServiceClient() + features = [videointelligence.Feature.TEXT_DETECTION] + video_context = videointelligence.VideoContext() + + with io.open(video, "rb") as file: + input_content = file.read() + + operation = video_client.annotate_video( + request={ + "features": features, + "input_content": input_content, + "video_context": video_context, + } + ) + + print("\nProcessing video for text detection.") + result = operation.result(timeout=300) + + # The first result is retrieved because a single video was processed. + annotation_result = result.annotation_results[0] + + # format text annotation results + # for each video-detected segment, get confidence + text_annotation_json = [] + + for text_annotation in annotation_result.text_annotations: + + text_segment = text_annotation.segments[0] + start_time = text_segment.segment.start_time_offset + end_time = text_segment.segment.end_time_offset + + frame = text_segment.frames[0] + time_offset = frame.time_offset + + current_text_annotation_json = { + "text": text_annotation.text, + "start": start_time.seconds + start_time.microseconds * 1e-6, + "end": end_time.seconds + end_time.microseconds * 1e-6, + "confidence": text_segment.confidence, + "vertecies": [] + } + + for vertex in frame.rotated_bounding_box.vertices: + current_text_annotation_json["vertecies"].append([vertex.x, vertex.y]) + text_annotation_json.append(current_text_annotation_json) + + out = [] + + for text_annotation in annotation_result.text_annotations: + + text_segment = text_annotation.segments[0] + start_time = text_segment.segment.start_time_offset + end_time = text_segment.segment.end_time_offset + + start_time_s = start_time.seconds + start_time.microseconds * 1e-6 + end_time_s = end_time.seconds + end_time.microseconds * 1e-6 + confidence = text_segment.confidence + + frame = text_segment.frames[0] + top_left = frame.rotated_bounding_box.vertices[0] + + out.append([start_time_s, end_time_s, text_annotation.text, confidence, top_left.y]) + + simple_text = [k for k in sorted(out, key= lambda k: k[0] + k[4]) if k[3] > 0.95] + + for s in simple_text: + print(s) + + with open('annotation.json', 'w') as f: + json.dump(text_annotation_json, f, indent=4) + + with open('simple_annotation.json', 'w') as f: + json.dump(simple_text, f, indent=4) + +def transcribe_video(video, cred_file): + + os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = cred_file + + if os.path.exists("output_audio.wav"): + os.remove("output_audio.wav") + else: + print("NOT THERE") + + wav_cmd = f"ffmpeg -i {video} output_audio.wav" + os.system(wav_cmd) + + print(os.path.exists("output_audio.wav")) + + gcs_uri = upload_file_to_bucket("output_audio.wav", cred_file) + + speech_client = speech.SpeechClient() + + with open("output_audio.wav", "rb") as f: + audio_content = f.read() + + + audio = speech.RecognitionAudio(uri=gcs_uri) + sample_rate_hertz, audio_channel_count = wav_data("output_audio.wav") + + config = speech.RecognitionConfig( + encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, + sample_rate_hertz=sample_rate_hertz, + audio_channel_count=audio_channel_count, + language_code="en-US", + model="video", + enable_word_time_offsets=True, + enable_automatic_punctuation=True, + enable_word_confidence=True + ) + + request = speech.LongRunningRecognizeRequest( + config=config, + audio=audio + ) + + operation = speech_client.long_running_recognize(request=request) + + print("Waiting for operation to complete...") + + response = operation.result(timeout=600) + + out = [] + for i, result in enumerate(response.results): + alternative = result.alternatives[0] + + if len(alternative.words) > 0: + alt_start = alternative.words[0].start_time.seconds + alternative.words[0].start_time.microseconds * 1e-6 + alt_end = alternative.words[-1].end_time.seconds + alternative.words[-1].end_time.microseconds * 1e-6 + + for word in alternative.words: + out.append([word.word, + word.start_time.seconds + word.start_time.microseconds * 1e-6, + word.end_time.seconds + word.end_time.microseconds * 1e-6, + word.confidence]) + + simple_text = [k for k in sorted(out, key= lambda k: k[1])] + for s in simple_text: + print(s) + + with open("speech_transcriptions.json", "w") as f: + json.dump(simple_text, f, indent=4) + + return simple_text + +def wav_data(wav_file): + + with wave.open(wav_file, 'rb') as wf: + sample_rate_hertz = wf.getframerate() + audio_channel_count = wf.getnchannels() + + return sample_rate_hertz, audio_channel_count + +def get_shot_frames(video, shot_text): + cam = cv2.VideoCapture(video) + fps = cam.get(cv2.CAP_PROP_FPS) + frame_count = int(cam.get(cv2.CAP_PROP_FRAME_COUNT)) + duration = frame_count/fps + + with open('timestamps.txt', 'r') as t: + times = [0] + [float(k) for k in t.read().split('\n') if k] + print("Times: ", times) + + with open('simple_annotation.json', 'r') as f: + simple_text = json.load(f) + + with open('speech_transcriptions.json', 'r') as f: + transcriptions = json.load(f) + + for i, time in enumerate(times): + current_time = time + next_time = times[i + 1] if i < len(times) - 1 else duration + + rel_text = [s for s in simple_text if s[0] >= current_time and s[0] < next_time] + plain_rel_text = ' '.join([s[2] for s in rel_text]) + + rel_transcriptions = [t for t in transcriptions if t[1] >= current_time and t[1] < next_time] + plain_transcriptions = ' '.join([t[0] for t in rel_transcriptions]) + + shot_text.append({ + "start": current_time, + "end": next_time, + "text_on_screen": plain_rel_text, + "transcript_text": plain_transcriptions + }) + + frames = [] + for i, shot in enumerate(shot_text): + keyframe_time = (shot["end"] - shot["start"])/2 + shot["start"] + cam.set(1, int(fps * (keyframe_time))) + ret, frame = cam.read() + + if ret: + cv2.imwrite('shot' + str(i) + '.png', frame) + frame_copy = Image.fromarray(frame).convert('RGB') + frames.append(frame_copy) + + return frames + + +def load_clip_model(): + device = 'cuda' if torch.cuda.is_available() else 'cpu' + clip_model, preprocess = clip.load('ViT-B/32', device=device) + + return clip_model, preprocess, device + +def clip_score(fn, text_list, clip_model, preprocess, clip_device): + fn.show() + image = preprocess(fn).unsqueeze(0).to(clip_device) + text = clip.tokenize(text_list).to(clip_device) + + with torch.no_grad(): + image_features = clip_model.encode_image(image) + text_features = clip_model.encode_text(text) + + logits_per_image, logits_per_text = clip_model(image, text) + probs = logits_per_image.softmax(dim=-1).cpu().numpy() + + return probs + + +def load_blip_model(): + device = "cuda:0" if torch.cuda.is_available() else "cpu" + + processor = Blip2Processor.from_pretrained('Salesforce/blip2-flan-t5-xxl') + model = Blip2ForConditionalGeneration.from_pretrained( + 'Salesforce/blip2-flan-t5-xxl', torch_dtype=torch.float16 + ) + + model = model.to(device) + + return model, processor, device + +def run_blip(shot_text, frames, model, processor, device, clip_model, preprocess, clip_device): + # get a caption for each image + + for i, shot in enumerate(shot_text): + if not os.path.exists(f"shot{i}.png"): + shot_text[i]["image_captions"] = ["" for _ in range(5)] + shot_text[i]["image_captions_clip"] = [{"text": "", "score": 0.0} for _ in range(5)] + continue + + image = Image.open(f"shot{i}.png").convert('RGB') + + with torch.no_grad(): + # nucleus sampling + gen_texts = [] + for j in range(5): + inputs = processor(images=image, return_tensors="pt").to(device, torch.float16) + generated_ids = model.generate(**inputs, min_length=5, max_length=20, do_sample=True, top_p=0.9) + generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() + gen_texts.append(generated_text) + + image.show() + shot_text[i]["image_captions"] = [gen_texts[j] for j in range(len(gen_texts))] + print(shot_text[i]["image_captions"]) + + clip_scores = clip_score(image.copy(), shot_text[i]["image_captions"], clip_model, preprocess, clip_device)[0] + print(clip_scores) + shot_text[i]["image_captions_clip"] = [{"text": shot_text[i]["image_captions"][j], + "score": float(clip_scores[j])} for j in range(len(shot_text[i]["image_captions"]))] + + shot_text[i]["image_captions_clip"] = sorted(shot_text[i]["image_captions_clip"], key=lambda x: x["score"] * -1) + + for s in shot_text[i]["image_captions_clip"]: + print(s) + + shot_text[i]["image_captions"] = [t["text"] for t in shot_text[i]["image_captions_clip"] if "caption" not in t["text"]] + + for i, shot in enumerate(shot_text): + if os.path.exists(f"shot{i}.png"): + os.remove(f"shot{i}.png") + + return shot_text + +def get_summaries(summary_input, openai_key): + gpt_system_prompt = f'''Your task is to generate a summary paragraph for an entire short-form video based on data extracted from the video. Your summary must be a holistic description of the full video. \n + + The text in quotations defines the format of the data that I will provide you. The video data comprises of data extracted from all shots of the video.\n + The data is formatted in the structure defined in the quotations:\n + "\n + SHOT NUMBER + Duration: the number of seconds that the shot lasts + Text on screen: Any text that appears in the shot + Shot audio transcript: Any speech that is in the shot + Shot description: A short visual description of what is happening in the shot + "\n + ''' + + gpt_user_prompt = f'''Perform this video summarization task for the video below, where the data is delimited by triple quotations.\n + Video: \n"""{summary_input}"""\n ''' + + messages = [{"role": "system", "content": gpt_system_prompt}, + {"role": "user", "content": gpt_user_prompt}] + responses = [] + + response = openai.ChatCompletion.create( + model='gpt-4', + messages=messages + ) + + messages.append(response.choices[0].message) + responses.append(response.choices[0].message["content"]) + + for word_limit in [50, 25, 10]: + + condense_prompt = f'''Condense the summary below such that the response adheres to a {word_limit} word limit.\n + Summary: """ {response.choices[0].message["content"]} """\n''' + + messages.append({"role": "user", "content": condense_prompt}) + + response = openai.ChatCompletion.create( + model='gpt-4', + messages=messages + ) + + messages.append(response.choices[0].message) + responses.append(response.choices[0].message["content"]) + + return responses + +def get_shot_summaries(summary_input, openai_key): + + gpt_system_prompt = f'''Your task is to generate a summary for each shot of a short-form video based on data extracted from the video.\n + + The text in quotations defines the format of the data that I will provide you. The video data comprises of data extracted from all shots of the video.\n + The data is formatted in the structure defined in the quotations:\n + "\n + SHOT NUMBER + Duration: the number of seconds that the shot lasts + Text on screen: Any text that appears in the shot + Shot audio transcript: Any speech that is in the shot + Shot description: A short visual description of what is happening in the shot + "\n + + All of the summaries you create must satisfy the following constraints:\n + + 1. If the field for text on screen is empty, do not include references to text on screen in the summary.\n + 2. If the field for shot audio transcript is empty, do not include references to shot audio transcript in the summary.\n + 3. If the field for shot description is empty, do not include references to the shot description in the summary.\n + 4. If the field for shot description is empty, do not include references to shot description in the summary.\n + 5. Do not include references to Tiktok logos or Tiktok usernames in the summary.\n + + There must be a summary for every shot in the data. + + Provide the summaries in a newline-separated format. There must be exactly one summary for every shot.\n + You must strictly follow the format inside the quotations.\n + + "Your first summary\n + Your second summary\n + Your third summary\n + More of your summaries...\n + Your last summary\n + " + + ''' + + gpt_user_prompt = f'''Perform this summarization task for the video below, where the data is delimited by triple quotations.\n + Video: \n"""{summary_input}"""\n ''' + + + messages = [{"role": "system", "content": gpt_system_prompt}, + {"role": "user", "content": gpt_user_prompt}] + responses = [] + + response = openai.ChatCompletion.create( + model='gpt-4', + messages=messages + ) + + messages.append(response.choices[0].message) + responses.append(response.choices[0].message["content"]) + + responses[0] = responses[0].strip() + shot_summary_list = [shot_summ.strip().strip('[]') for shot_summ in responses[0].split("\n") + if shot_summ.strip().strip('[]') != "" and shot_summ.strip().strip('[]') != " "] + + print(responses[0]) + print() + print(shot_summary_list) + print() + + return shot_summary_list + +def upload_file_to_bucket(filename, cred_file): + storage_client = storage.Client.from_service_account_json( + cred_file, + project="short-video-descriptions") + + bucket_name = "short-video-descriptions" + destination_blob_name = filename + bucket = storage_client.get_bucket(bucket_name) + blob = bucket.blob(destination_blob_name) + + blob.upload_from_filename(filename) + + return f"gs://{bucket_name}/{destination_blob_name}" + + +def blob_exists(filename, cred_file): + storage_client = storage.Client.from_service_account_json( + cred_file, + project="short-video-descriptions") + + bucket_name = 'short-video-descriptions' + bucket = storage_client.bucket(bucket_name) + stats = storage.Blob(bucket=bucket, name=filename).exists(storage_client) + + return stats + +def del_blob(blob_name, cred_file): + storage_client = storage.Client.from_service_account_json( + cred_file, + project="short-video-descriptions") + + bucket = storage_client.bucket("short-video-descriptions") + blob = bucket.blob(blob_name) + generation_match_precondition = None + + # Optional: set a generation-match precondition to avoid potential race conditions + # and data corruptions. The request to delete is aborted if the object's + # generation number does not match your precondition. + blob.reload() # Fetch blob metadata to use in generation_match_precondition. + generation_match_precondition = blob.generation + + blob.delete(if_generation_match=generation_match_precondition) + + print(f"Blob {blob_name} deleted.") + +def get_summary_input(shot_text): + summ_input = "" + for i, s in enumerate(shot_text): + summ_input += f"SHOT {i + 1}\n" + summ_input += f"Duration: {round(s['end'] - s['start'])} seconds\n" + summ_input += f"Text on screen: {s['text_on_screen']}\n" + summ_input += f"Shot audio transcript: {s['transcript_text']}\n" + summ_input += f"Shot description: {s['image_captions'][0] if len(s['image_captions']) > 0 else ''}\n" + summ_input += "\n" + + return summ_input + +def get_video_data(video, transcript, cred_file): + shot_text = [] + timestamps_output = get_timestamps(video) + get_text_annotations(video, cred_file.name) + transcribe_video(video, cred_file.name) + frames = get_shot_frames(video, shot_text) + shot_text = run_blip(shot_text, frames, model, processor, device, clip_model, preprocess, clip_device) + + return shot_text + +def get_video_information(video, cred_file, openai_key): + shot_text = [] + timestamps_output = get_timestamps(video) + get_text_annotations(video, cred_file.name) + transcribe_video(video, cred_file.name) + frames = get_shot_frames(video, shot_text) + shot_text = run_blip(shot_text, frames, model, processor, device, + clip_model, preprocess, clip_device) + + print("FINAL INPUT") + print(shot_text) + + with open('cur_shots.json', 'w') as f: + json.dump(shot_text, f, indent=4) + + summary_input = get_summary_input(shot_text) + summaries = get_summaries(summary_input, openai_key) + + print("ALL SUMMARIES") + for summary in summaries: + print(summary) + + return (shot_text, summary_input) + (*summaries,) + +def get_per_shot_information(video, cred_file, openai_key): + shot_text = [] + timestamps_output = get_timestamps(video) + get_text_annotations(video, cred_file.name) + transcribe_video(video, cred_file.name) + frames = get_shot_frames(video, shot_text) + # vtt_content = transcribe_audio_google(video) + # get_audio_transcript("transcribed_captions.vtt", shot_text) + shot_text = run_blip(shot_text, frames, model, processor, device, + clip_model, preprocess, clip_device) + + print("FINAL INPUT") + print(shot_text) + + with open('cur_shots.json', 'w') as f: + json.dump(shot_text, f, indent=4) + + summary_input = get_summary_input(shot_text) + per_shot_summaries = get_shot_summaries(summary_input, openai_key) + per_shot_data = create_per_shot_dict(shot_text, per_shot_summaries) + + return (per_shot_data, per_shot_summaries, summary_input) + +def create_per_shot_dict(shot_text, per_shot_summaries): + + for elem in per_shot_summaries: + print(elem) + + per_shot_data = [] + for i, s in enumerate(shot_text): + cur_summ = "" + if i < len(per_shot_summaries): + cur_summ = per_shot_summaries[i] + per_shot_data.append({ + "start": s["start"], + "end": s["end"], + "text_on_screen": s["text_on_screen"], + "per_shot_summaries": cur_summ + }) + + return per_shot_data + +with gr.Blocks() as demo: + with gr.Row(): + video = gr.Video(label='Video To Describe', interactive=True) + + with gr.Column(): + api_cred_file = gr.File(label='Google API Credentials File', file_types=['.json']) + openai_key = gr.Textbox(label="OpenAI API Key") + + + with gr.Row(): + summary_btn = gr.Button("Summarize Full Video") + summary_per_shot_btn = gr.Button("Summarize Each Shot") + + with gr.Row(): + summary_input = gr.Textbox(label="Extracted Video Data") + + with gr.Row(): + summary = gr.Textbox(label='Summary') + with gr.Column(): + summary_10 = gr.Textbox(label='10-word Summary') + summary_25 = gr.Textbox(label='25-word Summary') + summary_50 = gr.Textbox(label='50-word Summary') + + with gr.Row(): + per_shot_summaries = gr.Textbox(label="Per Shot Summaries") + + with gr.Row(): + shot_data = gr.JSON(label='Shot Data') + + # inputs = [video, transcript, api_cred_file, openai_key] + inputs = [video, api_cred_file, openai_key] + outputs = [shot_data, summary_input, summary, summary_50, summary_25, summary_10] + + summary_btn.click(fn=get_video_information, inputs=inputs, outputs=outputs) + summary_per_shot_btn.click(fn=get_per_shot_information, inputs=inputs, outputs=[shot_data, per_shot_summaries, summary_input]) + + + +def analyze_video(video_id: str): + shot_text = [] + + video_path = f"temporary_uploads/{video_id}.mp4" + + timestamps_output = get_timestamps(video_path) + get_text_annotations(video_path, cred_file) + transcribe_video(video_path, cred_file) + frames = get_shot_frames(video_path, shot_text) + shot_text = run_blip(shot_text, frames, model, processor, device, clip_model, preprocess, clip_device) + + return shot_text + +def summarize_video(video_id: str): + + video_path = f"temporary_uploads/{video_id}.mp4" + shot_text = analyze_video(video_id) + summary_input = get_summary_input(shot_text) + summaries = get_summaries(summary_input, openai_api_key) + + summary_json = { + "video_description": summaries[0], + "summary_10": summaries[3], + "summary_25": summaries[2], + "summary_50": summaries[1] + } + + return summary_json + +def summarize_shots(video_id: str): + + video_path = f"temporary_uploads/{video_id}.mp4" + shot_text = analyze_video(video_id) + summary_input = get_summary_input(shot_text) + per_shot_summaries = get_shot_summaries(summary_input, "") + per_shot_data = create_per_shot_dict(shot_text, per_shot_summaries) + + return per_shot_data + +app = FastAPI() +app = gr.mount_gradio_app(app, demo, path="/gradio") + +@app.get("/") +async def read_main(): + return {"message": "Welcome to ShortVideoA11y! Go to https://utcs-hci-short-video-descriptions.hf.space/gradio for an interactive demo!"} + +@app.get("/getVideoData/{video_id}") +async def create_video_data(video_id: str): + try: + shot_text = analyze_video(video_id) + return JSONResponse(content=shot_text) + + except Exception as e: + error_content = {"error": str(e)} + return JSONResponse(content=error_content, status_code=400) + +@app.get("/getShotSummaries/{video_id}") +async def create_shot_summaries(video_id: str): + + per_shot_data = summarize_shots(video_id) + return JSONResponse(content=per_shot_data) + +@app.get("/getVideoSummary/{video_id}") +async def create_video_summaries(video_id: str): + + vid_summaries = summarize_video(video_id) + return JSONResponse(content=vid_summaries) + +demo.queue() + +if not clip_loaded: + clip_model, preprocess, clip_device = load_clip_model() + clip_loaded = True + +if not blip_loaded: + model, processor, device = load_blip_model() + blip_loaded = True diff --git a/ffmpeg b/ffmpeg new file mode 100755 index 0000000000000000000000000000000000000000..9b9d77c51f54c1d3e96c26eb09b7a763bc34f226 --- /dev/null +++ b/ffmpeg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfe20936c83ecf5d68e424b87e8cc45b24dd6be81787810123bb964a0df686f9 +size 78829164 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..22c4be42e34d08ef7748334402e4d040284e4d4b --- /dev/null +++ b/requirements.txt @@ -0,0 +1,19 @@ +numpy +opencv-python +transformers +accelerate +openai +google-cloud-videointelligence +google-cloud-speech +google-cloud-storage +ftfy +regex +tqdm +git+https://github.com/openai/CLIP.git +fastapi +pydantic +uvicorn +gradio + + + diff --git a/temporary_uploads/00.mp4 b/temporary_uploads/00.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..603df90bed4627fa0765dc19476d3ab41282d4de --- /dev/null +++ b/temporary_uploads/00.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5473e1871a2e3c7f53be879f770b3fca72cdc9af5d1080d3efdaeb7a47724c7e +size 1355860 diff --git a/temporary_uploads/01.mp4 b/temporary_uploads/01.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..e06868dc87ee2996bd54ee85738fd5a2e0d003fc --- /dev/null +++ b/temporary_uploads/01.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e783983af3fd6f0c6acd4f5ed352f50924f091406d364db8d08169f25ff0e1f +size 10015373 diff --git a/temporary_uploads/02.mp4 b/temporary_uploads/02.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..240ba4a79e85cb2abf58c5c635f34d61871d857f --- /dev/null +++ b/temporary_uploads/02.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86084623354235419974b7bf756054f49b4974a31bd50d92b889e08605cae23d +size 905228 diff --git a/temporary_uploads/03.mp4 b/temporary_uploads/03.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..15e836cf83ad8b3f4e50b6e903d7651300674575 --- /dev/null +++ b/temporary_uploads/03.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e43f2f7bf476d6eb2c796a4ba3e8b605bba01e716cbcc370715f17874bbe5597 +size 6804858 diff --git a/temporary_uploads/04.mp4 b/temporary_uploads/04.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..ee8236da04e37cc3962409d22689d6ef35dcc86f --- /dev/null +++ b/temporary_uploads/04.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9dff0ba8a9abfe3c89007450d799e3b273657d115109988ad2a84985e93c3005 +size 2030868 diff --git a/temporary_uploads/05.mp4 b/temporary_uploads/05.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..69cd1141acc29cd7b2ebeab01a06dbe7d878000f --- /dev/null +++ b/temporary_uploads/05.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c78b5d4dc5b5ba772681770de8cb9d67e0a16eee8d41e7ed1629ffc2670a477 +size 8746174 diff --git a/temporary_uploads/06.mp4 b/temporary_uploads/06.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..549fb8a2a2317f64a8278ff73072c0f61dc8e752 --- /dev/null +++ b/temporary_uploads/06.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f17d7bd78ef1cacaa7bceb5e4254f48192c9a5ca6a953a1fd8a832900cc2b8c +size 9650819 diff --git a/temporary_uploads/07.mp4 b/temporary_uploads/07.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..273f5b993b256017e634b86340952bfaeda46580 --- /dev/null +++ b/temporary_uploads/07.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b6b80848b779070f8c97ef005c78cec1cb38d7b1d8dc9dd693ada1d635412aa +size 6965241 diff --git a/temporary_uploads/08.mp4 b/temporary_uploads/08.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..f6a69e7ba17b8e396b86617565716f42fc38e25a --- /dev/null +++ b/temporary_uploads/08.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d76535828210f382ff1a90c4c075b9a371551a46ce27a29987f2d277fa487a24 +size 3109225 diff --git a/temporary_uploads/09.mp4 b/temporary_uploads/09.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..206475fa407802fc816d24c523eb840676f46208 --- /dev/null +++ b/temporary_uploads/09.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:646d9ba9f24b8230f095ac769e23c4593665c8ab12753b0a921f8bafa53d879c +size 3346334 diff --git a/temporary_uploads/10.mp4 b/temporary_uploads/10.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..e7dcbf364909ff30d8c9e00ce9b8b9012dfb0416 --- /dev/null +++ b/temporary_uploads/10.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a826bbe9ef9fd5bfa6064d517c0c4bfb851699db8577aa55342fe90efd4d10e7 +size 13774198 diff --git a/temporary_uploads/11.mp4 b/temporary_uploads/11.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..06f8c89efb3866c624b86751fcb5d59137c9a50c --- /dev/null +++ b/temporary_uploads/11.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:311934836f75574e812dc41ce76591f021e91550c620214989b79e6e142afe39 +size 12821341 diff --git a/temporary_uploads/12.mp4 b/temporary_uploads/12.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..b1bde2c74e5e82259d53dea5c3d0e4a555f9af5d --- /dev/null +++ b/temporary_uploads/12.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc15a6c87991b2b3bad380e9a22179c10bc0928961ae69f01805fd02a09b33fb +size 4417086 diff --git a/temporary_uploads/13.mp4 b/temporary_uploads/13.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..a85d03d5925bbbf7006120bd174fc9851a6ce5dc --- /dev/null +++ b/temporary_uploads/13.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9150b291f38d04dffc5440e1732f21318338b8b385ed93eaea9b56aa843cb1b +size 2871413 diff --git a/temporary_uploads/14.mp4 b/temporary_uploads/14.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..f0129dca80d0046a39bb2daa467652ef027d63f6 --- /dev/null +++ b/temporary_uploads/14.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02650b0b36365220f232497e7d534f4a04979654e4bedb09295f750cd35e3147 +size 6351275 diff --git a/temporary_uploads/15.mp4 b/temporary_uploads/15.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..baab337eff76d2ebdf8d9a2144c5c09aadd4daff --- /dev/null +++ b/temporary_uploads/15.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dd0bb616575c9ee3f51651c9343c8a41c2d73bb33aa6182fad96375aa2a289c +size 9022185 diff --git a/temporary_uploads/16.mp4 b/temporary_uploads/16.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..e0b28dc84dceb7f2419a313e928367a642f4fbd8 --- /dev/null +++ b/temporary_uploads/16.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a7bd6ddc2b9640d0cec9a46c2405ce6fe3496fff8e9746cee5a06d92a0351a8 +size 1594594 diff --git a/temporary_uploads/17.mp4 b/temporary_uploads/17.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..71fcfc64827db290b4a82f5f1d456b77c8c85ace --- /dev/null +++ b/temporary_uploads/17.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5903a58ba48b14ae335686d65382155cf52d685d9d8b22f8eebcd5ce89a84513 +size 8583185 diff --git a/temporary_uploads/18.mp4 b/temporary_uploads/18.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..18703d094505b6fdeb393e9a41964dab997b3e92 --- /dev/null +++ b/temporary_uploads/18.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a30171ee97e016b89b8f52ccc11872fbbd7acac95088d0fdcc21f5cc2853bc2c +size 4493717 diff --git a/temporary_uploads/19.mp4 b/temporary_uploads/19.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..a5f80e64cfaf1f77b9c485edf325c41b446f8677 --- /dev/null +++ b/temporary_uploads/19.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81a73c594d21071c11b902979f597a1c910dec26b565cfa0641fd16b90f0590f +size 19825821 diff --git a/temporary_uploads/20.mp4 b/temporary_uploads/20.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..516dc470c289798108eb9d579c545b956a0a815f --- /dev/null +++ b/temporary_uploads/20.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de3e3f50201bc119cf659aedab31fbc59b6a734cdcf84563939516b4f7170ae1 +size 3318196 diff --git a/temporary_uploads/21.mp4 b/temporary_uploads/21.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..ece55d12f0c4a5e9f7efcd9956b58f980f42eead --- /dev/null +++ b/temporary_uploads/21.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcb2d2b22305aa3617d1ce196dfe9112abe4f0ba238bc88e7b27965f7504c390 +size 5656476 diff --git a/temporary_uploads/22.mp4 b/temporary_uploads/22.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..b9a7f1cded48495c186d2422e7e829500a4b9e66 --- /dev/null +++ b/temporary_uploads/22.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7399d1c0ecfbfc7de160c5d221b78d89dfffccf5e88747a6abc44df03a6b0ad1 +size 1519075 diff --git a/temporary_uploads/23.mp4 b/temporary_uploads/23.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..e1e9097ec044fda6f3411878f688166240709ebe --- /dev/null +++ b/temporary_uploads/23.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbf07a5151a000a888181f95b772f7e77895f549171dcca9f1d017b9d0918642 +size 4944295 diff --git a/temporary_uploads/24.mp4 b/temporary_uploads/24.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..690cedcdd7ba774457c50af75930cd98dc63c5cb --- /dev/null +++ b/temporary_uploads/24.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9d19eaa07c56633008fe2c6a8a2c983e76b42220a87c8d74dc717ef322ae65e +size 959905 diff --git a/temporary_uploads/25.mp4 b/temporary_uploads/25.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..f9b57967e482796b9278bcf6b28f3ed71021f2c5 --- /dev/null +++ b/temporary_uploads/25.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0192cab149d541a7f74b731296a95a5f2518c7509b8280802a0e9680dca06419 +size 1587171 diff --git a/temporary_uploads/26.mp4 b/temporary_uploads/26.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..8c708019844cc31d9164e9ce69e5e751f6c11ab5 --- /dev/null +++ b/temporary_uploads/26.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb2369ee0cbafc47ddc7686936900f46ff4d34703c126e07703e8a8a90b5c692 +size 839369 diff --git a/temporary_uploads/27.mp4 b/temporary_uploads/27.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..1527c3a0231e4b0a745e4a001867316cde97b512 --- /dev/null +++ b/temporary_uploads/27.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aeb78d4b75f12b3de47ea6eb3261fb779996c1a4fd9d25fa01197c5488dceeef +size 4407540 diff --git a/temporary_uploads/28.mp4 b/temporary_uploads/28.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..65c0852794e1c49b9b00ca6dab13a1550237327c --- /dev/null +++ b/temporary_uploads/28.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64dce12dfa07d38c4d04440a4216269d31bf727cd9fb95ee2aea79b6973b3d62 +size 7621275 diff --git a/temporary_uploads/29.mp4 b/temporary_uploads/29.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..96b8f66bd1044bbb63499dd1d99391f43e75880e --- /dev/null +++ b/temporary_uploads/29.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46c9ad4d59dd50e373b96458553618e7370785b48e2c00a011b52968311c43bf +size 14509704 diff --git a/temporary_uploads/30.mp4 b/temporary_uploads/30.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..0d80b9de02b125169fa94f5b762f4d5e9fc48e0d --- /dev/null +++ b/temporary_uploads/30.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91c79a15fa1c7e7cf2c6145ea8d1693f144dedcfcc0931e3ba07fa7e8cb5c3d8 +size 12327997 diff --git a/temporary_uploads/31.mp4 b/temporary_uploads/31.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..7615840609edfca53908b9f734fde4060c4a4891 --- /dev/null +++ b/temporary_uploads/31.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:135fdd6b27b4815c67942391dbc3682669fd4f3c603263f9e58e8e4f14a24084 +size 5777279 diff --git a/temporary_uploads/32.mp4 b/temporary_uploads/32.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..f2c51a539bb3e7102926132e4a167a23e6c49ed2 --- /dev/null +++ b/temporary_uploads/32.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:903c5b6a15ef492d9f09ba3bd2c776bbcd071280cf0fbe40807b8e0122d9e31e +size 19692437 diff --git a/temporary_uploads/33.mp4 b/temporary_uploads/33.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..fda89e18f48fad7e2953aeb6753889547a6cf83b --- /dev/null +++ b/temporary_uploads/33.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bbe568c2921d04a62347fd49ec7b3a49569999caacfbb3dd8b7ffa89574ae08 +size 11676309 diff --git a/temporary_uploads/34.mp4 b/temporary_uploads/34.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..2f418eda80e34b141584cddabe30d39f5d89d4bf --- /dev/null +++ b/temporary_uploads/34.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c2716a0f993f4f86d5435df6071f10e512a9412f7e3250933254f7f5b034748 +size 9072592 diff --git a/temporary_uploads/35.mp4 b/temporary_uploads/35.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..f65ee80f158dbd6606761a8ea0429ed2faa5efe7 --- /dev/null +++ b/temporary_uploads/35.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb400c187c9a8def2d88faddd7b0b8ee47604c19a824a42fea22ad6b6cecb983 +size 2700713 diff --git a/temporary_uploads/36.mp4 b/temporary_uploads/36.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..5dd0f341f2354bdead4e22f533690b487538912f --- /dev/null +++ b/temporary_uploads/36.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ceca619bb28d11339b10d9213d024486fad10bad7504a61d6f6aa8c38a060d62 +size 2412048 diff --git a/temporary_uploads/37.mp4 b/temporary_uploads/37.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..1df43bd72cd80cb7284ff2e2ed43fcfb346ef04b --- /dev/null +++ b/temporary_uploads/37.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78f8eafa868f0c45661196bf4cfce44fe6453e37c46dd985c606713d059c4418 +size 727556 diff --git a/temporary_uploads/38.mp4 b/temporary_uploads/38.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..4c758527539c518d9a913e41264aa3bc3395f470 --- /dev/null +++ b/temporary_uploads/38.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab5f78eba99e9ce66da9aa48d6e2bf49cc6855c3deaa7b467e5b97806e00e526 +size 15636325 diff --git a/temporary_uploads/39.mp4 b/temporary_uploads/39.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..18c70b35c7447a5e1975f24180eadfafb8ea3746 --- /dev/null +++ b/temporary_uploads/39.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19e6d09e98e145b9a6bf60f42f61c7c299e4aa202cde19f93eb0ee15b9d3a04e +size 2890293 diff --git a/temporary_uploads/40.mp4 b/temporary_uploads/40.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..11015f75467c5ad82173570b6564dff4712a811c --- /dev/null +++ b/temporary_uploads/40.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e63c994349b73089e3986c2fad5a998a20230e93ea8b8353a2b2407574cc3fa5 +size 522480 diff --git a/temporary_uploads/41.mp4 b/temporary_uploads/41.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..1721b2cd61cbd17252271d530910af5aa9222ee0 --- /dev/null +++ b/temporary_uploads/41.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b744daab943541733de5293284b5accc97855bddcb5bd157f32271fd94c98c5 +size 512357 diff --git a/temporary_uploads/42.mp4 b/temporary_uploads/42.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..8479a434d10e5c8e39d010a5df4d85860eabe43e --- /dev/null +++ b/temporary_uploads/42.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:922f7b881c7b17d40177775dfa4d16c64287e68264b62578fd18834175f1aaca +size 2953257 diff --git a/temporary_uploads/43.mp4 b/temporary_uploads/43.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..1d125d10445ec8719091212672f19090cdc65453 --- /dev/null +++ b/temporary_uploads/43.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad1749319f21293d29a94a6c1377b255cbd25382b6a5304251c7cf1a0c87483c +size 13226234 diff --git a/temporary_uploads/44.mp4 b/temporary_uploads/44.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..efb694e94088daaae2fdd8b7327fd76edf7b62dd --- /dev/null +++ b/temporary_uploads/44.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cf9ec81f6121dff557c13bb3f918cd0c5143d69d46126427c8faa404084b9d9 +size 4331917 diff --git a/temporary_uploads/45.mp4 b/temporary_uploads/45.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..6f7c1267a3ba3542b2b2b3f9840572db6adc5706 --- /dev/null +++ b/temporary_uploads/45.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2cd4147a56923dd38b168f38e0e548db825281bb0d2e580b338247bc5921c46 +size 4740606 diff --git a/temporary_uploads/46.mp4 b/temporary_uploads/46.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..2e0f284a7dd7fc706a9160d14fe0dee65e500661 --- /dev/null +++ b/temporary_uploads/46.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bf420c7ef9930e4d3f65fdcdd2db620c3619f1175046028f4fdc9dd12602389 +size 3916385 diff --git a/temporary_uploads/47.mp4 b/temporary_uploads/47.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..c6c662f1cffbd3357ea7d988f386b4422cf1ed09 --- /dev/null +++ b/temporary_uploads/47.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ff2b3de34fdd782d052aeafc12e0a5faed723a07021f8b34293c8855c526d14 +size 2361196 diff --git a/temporary_uploads/48.mp4 b/temporary_uploads/48.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..a00cf2b0f4484d37331974f05c49a644742098d1 --- /dev/null +++ b/temporary_uploads/48.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40278942ebd7f8dab05d1b8648f927a96dff984cd7526a32fcf8f44a050ace8c +size 10944928 diff --git a/temporary_uploads/49.mp4 b/temporary_uploads/49.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..75f0ebd5ee5101befceef11b4bc8078f206a7927 --- /dev/null +++ b/temporary_uploads/49.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:275a4e459306708ee6567fa1e0d20c82befe9b80e2a10f6239a37be7f40dc3b0 +size 1410057 diff --git a/temporary_uploads/50.mp4 b/temporary_uploads/50.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..edfe94f684689bde66dd8c9c0f5cde1da8343490 --- /dev/null +++ b/temporary_uploads/50.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e30f290aa2de154be2149ad7db414499d9dc80def4681e2f07a0e0f13ecd44a +size 16437193 diff --git a/temporary_uploads/51.mp4 b/temporary_uploads/51.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..3c04f49a0deb5bf2d8b5621979d7b3669ad32052 --- /dev/null +++ b/temporary_uploads/51.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81e10ffbb558a9a906a72f36321132b5373f7f9f3c07c28abca5ccc00e3808ff +size 5204822 diff --git a/temporary_uploads/52.mp4 b/temporary_uploads/52.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..e5853f37223289a548dc7c74fb0a246adcfd5174 --- /dev/null +++ b/temporary_uploads/52.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:380fab33ccfd5b16061fa87d47d756c7a9e2432eaf1f0b2d6cfcf727bd3e296f +size 4725365 diff --git a/temporary_uploads/53.mp4 b/temporary_uploads/53.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..deeab8196d523e593b1ed63496e9c7c68c4af963 --- /dev/null +++ b/temporary_uploads/53.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a818ef7e2d22544c36ae54c4ba090b47dd603c0fd01581a685654dbbb5964313 +size 2148317 diff --git a/temporary_uploads/54.mp4 b/temporary_uploads/54.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..e0758d42e605aa73546cd2cc70bcb1885c0adb6d --- /dev/null +++ b/temporary_uploads/54.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8cc12643ccde1fec2a3f11d86db1dc6b09b45274d45e05b0371b7e678f79a9e +size 904969 diff --git a/temporary_uploads/55.mp4 b/temporary_uploads/55.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..aeed97e7b77e66eed42719ce59bb6d80fe34fe7f --- /dev/null +++ b/temporary_uploads/55.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee6dfd913920220284f7f4a8f92d3d9fd47ad2a37116cb0b7528bbc36e76e01b +size 1416282 diff --git a/temporary_uploads/56.mp4 b/temporary_uploads/56.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..2068bcd9d8f3dec92965454dc860e16e59703a5d --- /dev/null +++ b/temporary_uploads/56.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c35c4b6b35782c69a271a8c3e57d6ce5a1f7f97b2eb9f727016eb9e2c8914ec0 +size 13037015 diff --git a/temporary_uploads/57.mp4 b/temporary_uploads/57.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..08a936dd0dac02a45bc24da9e5f23b3e81263caf --- /dev/null +++ b/temporary_uploads/57.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af28dd24d3aeb302ade1d7a5418c3dbb4be6290f1a72138e3b6bfb338137f9bf +size 2323974 diff --git a/temporary_uploads/58.mp4 b/temporary_uploads/58.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..dbbc14371c695d46ed6d1b174440452ab9808944 --- /dev/null +++ b/temporary_uploads/58.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a6950c9aa80f79843d89f267e1ab3205b895ab8c90c70da2c70900d4dcb0e25 +size 7383048 diff --git a/temporary_uploads/59.mp4 b/temporary_uploads/59.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..f848fd01f20148b7a0d5906e1efda08b017e4ac0 --- /dev/null +++ b/temporary_uploads/59.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8133dc794360ddbbfbbf8a16722d2b2b66af2811a524285da815abcb4e379971 +size 17222679 diff --git a/temporary_uploads/60.mp4 b/temporary_uploads/60.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..8d01a94ebd4b879485c34cc7df1fba9d7b051bbb --- /dev/null +++ b/temporary_uploads/60.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d41121bcd3fb2cff68c5b3362793132ed7c94270051a80db1fc5cd4f915e63a9 +size 543869 diff --git a/temporary_uploads/61.mp4 b/temporary_uploads/61.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..3f491e552ca9784a05ee259b308109cec8b6436c --- /dev/null +++ b/temporary_uploads/61.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52a9836a1656097db08de3cfd2072ceec041231c16cebb7563f70e57b67a7799 +size 2199785 diff --git a/temporary_uploads/62.mp4 b/temporary_uploads/62.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..5601541ef914861dbcb3a5eb5542a2659848881f --- /dev/null +++ b/temporary_uploads/62.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47ddf9d75e1e4c4e70875455c2d42414fd90621416093bba4306ea0091cf6df6 +size 4152861 diff --git a/temporary_uploads/63.mp4 b/temporary_uploads/63.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..bda42f5e6f032ff3d94118a856457a3c12f5078d --- /dev/null +++ b/temporary_uploads/63.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65e9d8a52ac5e1546f9cb6fff16961a5af1780c5308f5855032a503963333627 +size 470431 diff --git a/temporary_uploads/64.mp4 b/temporary_uploads/64.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..7d13e95c27c6a088114e21a7dbfc58c8d398c2d2 --- /dev/null +++ b/temporary_uploads/64.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78a6a456a12bec468caea5e8004cc7dbef08431b864c079fd7d860894a73e0bd +size 7231755 diff --git a/temporary_uploads/65.mp4 b/temporary_uploads/65.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..c2ef67a41b91188ec569820d4433d00bf4cf397a --- /dev/null +++ b/temporary_uploads/65.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8598127049dde33c199a2ea8e65adc4560d608ec6452890702c450e8b8bdbb8 +size 4133135 diff --git a/temporary_uploads/66.mp4 b/temporary_uploads/66.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..8260c64f909ef890801251b1627898c348489b95 --- /dev/null +++ b/temporary_uploads/66.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a91c95b25a7df6167f8edf7b7f3e4e6c8559c612ad8876fc4888782d95bbce1b +size 8354184 diff --git a/temporary_uploads/67.mp4 b/temporary_uploads/67.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..a264581b2e7c8eb3cf1bf095b657d56ca20537fc --- /dev/null +++ b/temporary_uploads/67.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a8f0d55b50541c97ce8065ae656dbb02892857f727c01463004845c33a87c60 +size 13824741 diff --git a/temporary_uploads/68.mp4 b/temporary_uploads/68.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..7e2a3fd070115ad116f6bc755e3be8a70ee91175 --- /dev/null +++ b/temporary_uploads/68.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb239545b8572725f5b9d5dea541f31d5047d419b175c88b7b35a76d1f5a8922 +size 11514655 diff --git a/temporary_uploads/69.mp4 b/temporary_uploads/69.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..e2f0ed25ace87aa0107fe69f1a703b2ccf6a7939 --- /dev/null +++ b/temporary_uploads/69.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:990f30c2014bba7a463812954225526660b5755eddbfb74aaba6f83c08cda0b0 +size 7563120 diff --git a/temporary_uploads/70.mp4 b/temporary_uploads/70.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..05f9fd3f59b65715d5738ea4e664b21b116081b0 --- /dev/null +++ b/temporary_uploads/70.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6682f88b0b8454a667d665572ae987c9cfe7049bf2fcf0025dfaca83f3e209b1 +size 8875194 diff --git a/temporary_uploads/71.mp4 b/temporary_uploads/71.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..23717b8d9609587f2111190a9de8f12cb20e386b --- /dev/null +++ b/temporary_uploads/71.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2eb4d8e25aafb45f1043590f331a57da537701000b5e03191b14995b1bbc0301 +size 3540367 diff --git a/temporary_uploads/72.mp4 b/temporary_uploads/72.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..8455b5c1c8dae888cfb9f3553a19cabee66c943e --- /dev/null +++ b/temporary_uploads/72.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98dc9e540c72e5dd957a152726e3e1e24e0ae8f0d36ccb8b83c18f61b4621bb2 +size 5961879