Spaces:
Sleeping
Sleeping
aakash0017
commited on
Commit
·
676f0c5
1
Parent(s):
3958245
Upload folder using huggingface_hub
Browse files- .DS_Store +0 -0
- .ipynb_checkpoints/Spark_Deployment-checkpoint.ipynb +693 -0
- Dockerfile +20 -0
- README.md +3 -9
- Spark_Deployment.ipynb +703 -0
- Spark_Deployment_Final.ipynb +0 -0
- YOLOv8 Best We.pt +3 -0
- __MACOSX/._example 3 copy.mp4 +0 -0
- __MACOSX/._example 3.mp4 +0 -0
- app.py +277 -0
- requirements.txt +134 -0
- test.zip +3 -0
- zip_file_with_email.py +57 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
.ipynb_checkpoints/Spark_Deployment-checkpoint.ipynb
ADDED
@@ -0,0 +1,693 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"nbformat": 4,
|
3 |
+
"nbformat_minor": 0,
|
4 |
+
"metadata": {
|
5 |
+
"colab": {
|
6 |
+
"provenance": [],
|
7 |
+
"gpuType": "T4"
|
8 |
+
},
|
9 |
+
"kernelspec": {
|
10 |
+
"name": "python3",
|
11 |
+
"display_name": "Python 3"
|
12 |
+
},
|
13 |
+
"language_info": {
|
14 |
+
"name": "python"
|
15 |
+
},
|
16 |
+
"accelerator": "GPU"
|
17 |
+
},
|
18 |
+
"cells": [
|
19 |
+
{
|
20 |
+
"cell_type": "code",
|
21 |
+
"execution_count": 1,
|
22 |
+
"metadata": {
|
23 |
+
"colab": {
|
24 |
+
"base_uri": "https://localhost:8080/"
|
25 |
+
},
|
26 |
+
"id": "mM4DvqTJGzwt",
|
27 |
+
"outputId": "f9e04f60-2edd-4c55-962b-b18f5dd37001"
|
28 |
+
},
|
29 |
+
"outputs": [
|
30 |
+
{
|
31 |
+
"output_type": "stream",
|
32 |
+
"name": "stdout",
|
33 |
+
"text": [
|
34 |
+
"Mounted at /content/drive\n"
|
35 |
+
]
|
36 |
+
}
|
37 |
+
],
|
38 |
+
"source": [
|
39 |
+
"from google.colab import drive\n",
|
40 |
+
"drive.mount('/content/drive')"
|
41 |
+
]
|
42 |
+
},
|
43 |
+
{
|
44 |
+
"cell_type": "code",
|
45 |
+
"source": [
|
46 |
+
"!sudo apt update && sudo apt install ffmpeg"
|
47 |
+
],
|
48 |
+
"metadata": {
|
49 |
+
"id": "rQkjmqCqRlVs"
|
50 |
+
},
|
51 |
+
"execution_count": null,
|
52 |
+
"outputs": []
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"cell_type": "code",
|
56 |
+
"source": [
|
57 |
+
"!git clone https://github.com/k-sashank/ml-nlgma-body-cam.git"
|
58 |
+
],
|
59 |
+
"metadata": {
|
60 |
+
"colab": {
|
61 |
+
"base_uri": "https://localhost:8080/"
|
62 |
+
},
|
63 |
+
"id": "6qpSdD_WSiC3",
|
64 |
+
"outputId": "bd8400dc-244a-4529-902e-8838775f431c"
|
65 |
+
},
|
66 |
+
"execution_count": 30,
|
67 |
+
"outputs": [
|
68 |
+
{
|
69 |
+
"output_type": "stream",
|
70 |
+
"name": "stdout",
|
71 |
+
"text": [
|
72 |
+
"Cloning into 'ml-nlgma-body-cam'...\n",
|
73 |
+
"remote: Enumerating objects: 395, done.\u001b[K\n",
|
74 |
+
"remote: Counting objects: 100% (292/292), done.\u001b[K\n",
|
75 |
+
"remote: Compressing objects: 100% (239/239), done.\u001b[K\n",
|
76 |
+
"remote: Total 395 (delta 77), reused 224 (delta 47), pack-reused 103\u001b[K\n",
|
77 |
+
"Receiving objects: 100% (395/395), 9.15 MiB | 6.64 MiB/s, done.\n",
|
78 |
+
"Resolving deltas: 100% (117/117), done.\n"
|
79 |
+
]
|
80 |
+
}
|
81 |
+
]
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"cell_type": "code",
|
85 |
+
"source": [
|
86 |
+
"cd ml-nlgma-body-cam/deployment/"
|
87 |
+
],
|
88 |
+
"metadata": {
|
89 |
+
"colab": {
|
90 |
+
"base_uri": "https://localhost:8080/"
|
91 |
+
},
|
92 |
+
"id": "S1z-orpESvt7",
|
93 |
+
"outputId": "5a2fdae3-455a-4234-e4cb-da7578ca8a6c"
|
94 |
+
},
|
95 |
+
"execution_count": 34,
|
96 |
+
"outputs": [
|
97 |
+
{
|
98 |
+
"output_type": "stream",
|
99 |
+
"name": "stdout",
|
100 |
+
"text": [
|
101 |
+
"/content/ml-nlgma-body-cam/deployment\n"
|
102 |
+
]
|
103 |
+
}
|
104 |
+
]
|
105 |
+
},
|
106 |
+
{
|
107 |
+
"cell_type": "code",
|
108 |
+
"source": [
|
109 |
+
"!pip install -r requirements.txt"
|
110 |
+
],
|
111 |
+
"metadata": {
|
112 |
+
"id": "byk_2SlPsWAZ",
|
113 |
+
"colab": {
|
114 |
+
"base_uri": "https://localhost:8080/"
|
115 |
+
},
|
116 |
+
"outputId": "6286e21b-41ce-40cc-861b-022f6275f4a5"
|
117 |
+
},
|
118 |
+
"execution_count": 29,
|
119 |
+
"outputs": [
|
120 |
+
{
|
121 |
+
"output_type": "stream",
|
122 |
+
"name": "stdout",
|
123 |
+
"text": [
|
124 |
+
"Requirement already satisfied: opencv-python in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 1)) (4.8.0.76)\n",
|
125 |
+
"Requirement already satisfied: ultralytics in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 2)) (8.0.219)\n",
|
126 |
+
"Requirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 3)) (2.1.0+cu118)\n",
|
127 |
+
"Requirement already satisfied: transformers in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 4)) (4.35.2)\n",
|
128 |
+
"Requirement already satisfied: Cython in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 5)) (3.0.5)\n",
|
129 |
+
"Requirement already satisfied: wget in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 6)) (3.2)\n",
|
130 |
+
"Requirement already satisfied: accelerate in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 7)) (0.24.1)\n",
|
131 |
+
"Requirement already satisfied: openai in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 8)) (1.3.5)\n",
|
132 |
+
"Requirement already satisfied: openai-whisper in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 9)) (20231117)\n",
|
133 |
+
"Requirement already satisfied: numpy>=1.21.2 in /usr/local/lib/python3.10/dist-packages (from opencv-python->-r requirements.txt (line 1)) (1.23.5)\n",
|
134 |
+
"Requirement already satisfied: matplotlib>=3.3.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (3.7.1)\n",
|
135 |
+
"Requirement already satisfied: pillow>=7.1.2 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (9.4.0)\n",
|
136 |
+
"Requirement already satisfied: pyyaml>=5.3.1 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (6.0.1)\n",
|
137 |
+
"Requirement already satisfied: requests>=2.23.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (2.31.0)\n",
|
138 |
+
"Requirement already satisfied: scipy>=1.4.1 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (1.11.3)\n",
|
139 |
+
"Requirement already satisfied: torchvision>=0.9.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (0.16.0+cu118)\n",
|
140 |
+
"Requirement already satisfied: tqdm>=4.64.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (4.66.1)\n",
|
141 |
+
"Requirement already satisfied: pandas>=1.1.4 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (1.5.3)\n",
|
142 |
+
"Requirement already satisfied: seaborn>=0.11.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (0.12.2)\n",
|
143 |
+
"Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (5.9.5)\n",
|
144 |
+
"Requirement already satisfied: py-cpuinfo in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (9.0.0)\n",
|
145 |
+
"Requirement already satisfied: thop>=0.1.1 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (0.1.1.post2209072238)\n",
|
146 |
+
"Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch->-r requirements.txt (line 3)) (3.13.1)\n",
|
147 |
+
"Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch->-r requirements.txt (line 3)) (4.8.0)\n",
|
148 |
+
"Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch->-r requirements.txt (line 3)) (1.12)\n",
|
149 |
+
"Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch->-r requirements.txt (line 3)) (3.2.1)\n",
|
150 |
+
"Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch->-r requirements.txt (line 3)) (3.1.2)\n",
|
151 |
+
"Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch->-r requirements.txt (line 3)) (2023.6.0)\n",
|
152 |
+
"Requirement already satisfied: triton==2.1.0 in /usr/local/lib/python3.10/dist-packages (from torch->-r requirements.txt (line 3)) (2.1.0)\n",
|
153 |
+
"Requirement already satisfied: huggingface-hub<1.0,>=0.16.4 in /usr/local/lib/python3.10/dist-packages (from transformers->-r requirements.txt (line 4)) (0.19.4)\n",
|
154 |
+
"Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers->-r requirements.txt (line 4)) (23.2)\n",
|
155 |
+
"Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers->-r requirements.txt (line 4)) (2023.6.3)\n",
|
156 |
+
"Requirement already satisfied: tokenizers<0.19,>=0.14 in /usr/local/lib/python3.10/dist-packages (from transformers->-r requirements.txt (line 4)) (0.15.0)\n",
|
157 |
+
"Requirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.10/dist-packages (from transformers->-r requirements.txt (line 4)) (0.4.0)\n",
|
158 |
+
"Requirement already satisfied: anyio<4,>=3.5.0 in /usr/local/lib/python3.10/dist-packages (from openai->-r requirements.txt (line 8)) (3.7.1)\n",
|
159 |
+
"Requirement already satisfied: distro<2,>=1.7.0 in /usr/lib/python3/dist-packages (from openai->-r requirements.txt (line 8)) (1.7.0)\n",
|
160 |
+
"Requirement already satisfied: httpx<1,>=0.23.0 in /usr/local/lib/python3.10/dist-packages (from openai->-r requirements.txt (line 8)) (0.25.2)\n",
|
161 |
+
"Requirement already satisfied: pydantic<3,>=1.9.0 in /usr/local/lib/python3.10/dist-packages (from openai->-r requirements.txt (line 8)) (2.5.2)\n",
|
162 |
+
"Requirement already satisfied: numba in /usr/local/lib/python3.10/dist-packages (from openai-whisper->-r requirements.txt (line 9)) (0.58.1)\n",
|
163 |
+
"Requirement already satisfied: more-itertools in /usr/local/lib/python3.10/dist-packages (from openai-whisper->-r requirements.txt (line 9)) (10.1.0)\n",
|
164 |
+
"Requirement already satisfied: tiktoken in /usr/local/lib/python3.10/dist-packages (from openai-whisper->-r requirements.txt (line 9)) (0.5.1)\n",
|
165 |
+
"Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.10/dist-packages (from anyio<4,>=3.5.0->openai->-r requirements.txt (line 8)) (3.4)\n",
|
166 |
+
"Requirement already satisfied: sniffio>=1.1 in /usr/local/lib/python3.10/dist-packages (from anyio<4,>=3.5.0->openai->-r requirements.txt (line 8)) (1.3.0)\n",
|
167 |
+
"Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio<4,>=3.5.0->openai->-r requirements.txt (line 8)) (1.1.3)\n",
|
168 |
+
"Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx<1,>=0.23.0->openai->-r requirements.txt (line 8)) (2023.7.22)\n",
|
169 |
+
"Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.10/dist-packages (from httpx<1,>=0.23.0->openai->-r requirements.txt (line 8)) (1.0.2)\n",
|
170 |
+
"Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.10/dist-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai->-r requirements.txt (line 8)) (0.14.0)\n",
|
171 |
+
"Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics->-r requirements.txt (line 2)) (1.2.0)\n",
|
172 |
+
"Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics->-r requirements.txt (line 2)) (0.12.1)\n",
|
173 |
+
"Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics->-r requirements.txt (line 2)) (4.44.3)\n",
|
174 |
+
"Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics->-r requirements.txt (line 2)) (1.4.5)\n",
|
175 |
+
"Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics->-r requirements.txt (line 2)) (3.1.1)\n",
|
176 |
+
"Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics->-r requirements.txt (line 2)) (2.8.2)\n",
|
177 |
+
"Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas>=1.1.4->ultralytics->-r requirements.txt (line 2)) (2023.3.post1)\n",
|
178 |
+
"Requirement already satisfied: annotated-types>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from pydantic<3,>=1.9.0->openai->-r requirements.txt (line 8)) (0.6.0)\n",
|
179 |
+
"Requirement already satisfied: pydantic-core==2.14.5 in /usr/local/lib/python3.10/dist-packages (from pydantic<3,>=1.9.0->openai->-r requirements.txt (line 8)) (2.14.5)\n",
|
180 |
+
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->ultralytics->-r requirements.txt (line 2)) (3.3.2)\n",
|
181 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->ultralytics->-r requirements.txt (line 2)) (2.0.7)\n",
|
182 |
+
"Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch->-r requirements.txt (line 3)) (2.1.3)\n",
|
183 |
+
"Requirement already satisfied: llvmlite<0.42,>=0.41.0dev0 in /usr/local/lib/python3.10/dist-packages (from numba->openai-whisper->-r requirements.txt (line 9)) (0.41.1)\n",
|
184 |
+
"Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch->-r requirements.txt (line 3)) (1.3.0)\n",
|
185 |
+
"Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib>=3.3.0->ultralytics->-r requirements.txt (line 2)) (1.16.0)\n"
|
186 |
+
]
|
187 |
+
}
|
188 |
+
]
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"cell_type": "code",
|
192 |
+
"source": [
|
193 |
+
"import whisper\n",
|
194 |
+
"import cv2\n",
|
195 |
+
"import os\n",
|
196 |
+
"import urllib.request\n",
|
197 |
+
"from PIL import Image\n",
|
198 |
+
"from ultralytics import YOLO\n",
|
199 |
+
"import torch\n",
|
200 |
+
"import matplotlib.pyplot as plt\n",
|
201 |
+
"from tqdm import tqdm\n",
|
202 |
+
"from transformers import pipeline\n",
|
203 |
+
"import moviepy.editor as mp\n",
|
204 |
+
"import json\n",
|
205 |
+
"import re\n",
|
206 |
+
"import gradio as gr\n",
|
207 |
+
"from openai import OpenAI"
|
208 |
+
],
|
209 |
+
"metadata": {
|
210 |
+
"id": "3ZrCX53_ssHy"
|
211 |
+
},
|
212 |
+
"execution_count": 8,
|
213 |
+
"outputs": []
|
214 |
+
},
|
215 |
+
{
|
216 |
+
"cell_type": "markdown",
|
217 |
+
"source": [
|
218 |
+
"# Full"
|
219 |
+
],
|
220 |
+
"metadata": {
|
221 |
+
"id": "lmWMJ_FeJDyD"
|
222 |
+
}
|
223 |
+
},
|
224 |
+
{
|
225 |
+
"cell_type": "code",
|
226 |
+
"source": [
|
227 |
+
"def video_transcription(video_path):\n",
|
228 |
+
" try:\n",
|
229 |
+
" model = whisper.load_model('large-v3')\n",
|
230 |
+
" transcript = model.transcribe(video_path, verbose = True)\n",
|
231 |
+
"\n",
|
232 |
+
" # JSON Dump (Find a way to not create a file and just dump into a variable or something)\n",
|
233 |
+
" #json_file_path = video_path.split('/')[-1][:-4]+ \".json\"\n",
|
234 |
+
" #with open(json_file_path, 'w') as json_file:\n",
|
235 |
+
" #json.dump(transcript, json_file, indent = 2)\n",
|
236 |
+
" #json_file_path = video_path.split('/')[-1][:-4]+ \".json\"\n",
|
237 |
+
" #with open(json_file_path, 'w') as json_file:\n",
|
238 |
+
" return json.dump(transcript)\n",
|
239 |
+
"\n",
|
240 |
+
" except Exception as e:\n",
|
241 |
+
" return e\n",
|
242 |
+
"\n",
|
243 |
+
"def action_detection(transcript, openai_key):\n",
|
244 |
+
" try:\n",
|
245 |
+
" # JSON Dump (Find a way to not create a file and just dump into a variable or something)\n",
|
246 |
+
" #with open(json_path, 'r') as f:\n",
|
247 |
+
" # transcript = json.load(f)\n",
|
248 |
+
" transcript_string = ''\n",
|
249 |
+
" for segments in transcript['segments']:\n",
|
250 |
+
" transcript_string+=str(segments['id'])+str(segments['text']+'\\n')\n",
|
251 |
+
"\n",
|
252 |
+
" client = OpenAI(api_key = openai_key)\n",
|
253 |
+
"\n",
|
254 |
+
" completion = client.chat.completions.create(\n",
|
255 |
+
" model=\"gpt-3.5-turbo-1106\",\n",
|
256 |
+
" messages=[\n",
|
257 |
+
" {\"role\": \"system\", \"content\": f\"Given this {transcript_string} You are an AI system specialized in detecting planning issues, critiquing plans, and analyzing conversations between people regarding how to disperse. Additionally, identify any instances suggesting 1st Amendment violations or officers expressing the belief that this protest was anti-police. Finally, flag any aggressive comments found in the audio transcript.\"},\n",
|
258 |
+
" {\"role\": \"user\", \"content\":\"Give responce like this following examples: Sentence: '18: What do you got?' Explanation: This sentence may indicate confusion or a need for clarification, as the speaker is asking for information. It could potentially be a planning issue if the speaker is seeking information to execute a specific task.\"}\n",
|
259 |
+
" ]\n",
|
260 |
+
" )\n",
|
261 |
+
"\n",
|
262 |
+
" output = completion.choices[0].message.content\n",
|
263 |
+
"\n",
|
264 |
+
" paragraphs = re.split(r'\\n\\n', output)\n",
|
265 |
+
"\n",
|
266 |
+
" sentences = []\n",
|
267 |
+
" explanations = []\n",
|
268 |
+
"\n",
|
269 |
+
" for paragraph in paragraphs:\n",
|
270 |
+
" sentence_match = re.search(r\"Sentence: '(.+)'\", paragraph)\n",
|
271 |
+
" explanation_match = re.search(r\"Explanation: (.+)\", paragraph)\n",
|
272 |
+
"\n",
|
273 |
+
" if sentence_match and explanation_match:\n",
|
274 |
+
" sentences.append(sentence_match.group(1).split(': ')[-1])\n",
|
275 |
+
" explanations.append(explanation_match.group(1))\n",
|
276 |
+
"\n",
|
277 |
+
" #for i in range(len(sentences)):\n",
|
278 |
+
" # print(f\"Sentence: '{sentences[i]}'\")\n",
|
279 |
+
" # print(f\"Explanation: {explanations[i]}\\n\")\n",
|
280 |
+
"\n",
|
281 |
+
" for sentence_to_search in sentences:\n",
|
282 |
+
" pattern = re.compile(re.escape(sentence_to_search), re.IGNORECASE)\n",
|
283 |
+
"\n",
|
284 |
+
" matching_entries = [entry for entry in transcript['segments'] if re.search(pattern, entry['text'])]\n",
|
285 |
+
"\n",
|
286 |
+
" sent_with_time = []\n",
|
287 |
+
" if matching_entries:\n",
|
288 |
+
" for entry in matching_entries:\n",
|
289 |
+
" sent_with_time.append(sentence_to_search + ' Start Time: ', entry['start'] + ' End Time: ', entry['end'])\n",
|
290 |
+
"\n",
|
291 |
+
" return sent_with_time\n",
|
292 |
+
"\n",
|
293 |
+
" except Exception as e:\n",
|
294 |
+
" return e\n",
|
295 |
+
"\n",
|
296 |
+
"def process_video(video_path, weights):\n",
|
297 |
+
" try:\n",
|
298 |
+
" # This code cell detects batons in the video\n",
|
299 |
+
" current_frame = 0\n",
|
300 |
+
" model = YOLO(weights)\n",
|
301 |
+
" cap = cv2.VideoCapture(video_path)\n",
|
302 |
+
" fps = int(cap.get(cv2.CAP_PROP_FPS))\n",
|
303 |
+
" conseq_frames = 0\n",
|
304 |
+
" start_time = \"\"\n",
|
305 |
+
" end_time = \"\"\n",
|
306 |
+
" res = []\n",
|
307 |
+
"\n",
|
308 |
+
" while True:\n",
|
309 |
+
" ret, frame = cap.read()\n",
|
310 |
+
" if not ret:\n",
|
311 |
+
" break\n",
|
312 |
+
"\n",
|
313 |
+
" # Detecting baton on one frame per second\n",
|
314 |
+
" if current_frame % fps == 0:\n",
|
315 |
+
" currect_sec = current_frame/fps\n",
|
316 |
+
"\n",
|
317 |
+
" # Model prediction on current frame\n",
|
318 |
+
" results = model(frame, verbose = False)\n",
|
319 |
+
" count = 0\n",
|
320 |
+
" classes = results[0].boxes.data\n",
|
321 |
+
"\n",
|
322 |
+
" # Formatting the time for printing\n",
|
323 |
+
" hours, remainder = divmod(currect_sec, 3600)\n",
|
324 |
+
" minutes, seconds = divmod(remainder, 60)\n",
|
325 |
+
" hours = str(int(hours)).zfill(2)\n",
|
326 |
+
" minutes = str(int(minutes)).zfill(2)\n",
|
327 |
+
" seconds = str(int(seconds)).zfill(2)\n",
|
328 |
+
"\n",
|
329 |
+
" for i in classes:\n",
|
330 |
+
"\n",
|
331 |
+
" # Checking if baton is detected (i.e. if the class corresponding to baton is 1 or not)\n",
|
332 |
+
" if float(i[5]) == 1:\n",
|
333 |
+
" count+=1\n",
|
334 |
+
"\n",
|
335 |
+
" # Marking the start_time if this is the first consecutive frame a baton is detected in\n",
|
336 |
+
" if count >= 1:\n",
|
337 |
+
" conseq_frames+=1\n",
|
338 |
+
" if conseq_frames == 1:\n",
|
339 |
+
" start_time = hours + \":\" + minutes + \":\" + seconds\n",
|
340 |
+
"\n",
|
341 |
+
" # Marking the end time if after one or multiple consecutive frames of detection, a baton is not detected\n",
|
342 |
+
" else:\n",
|
343 |
+
" if conseq_frames > 0:\n",
|
344 |
+
" conseq_frames = 0\n",
|
345 |
+
" end_time = hours + \":\" + minutes + \":\" + seconds\n",
|
346 |
+
"\n",
|
347 |
+
" # Printing time intervals in which baton was detected\n",
|
348 |
+
" res.append(start_time + \" to \" + end_time)\n",
|
349 |
+
" start_time = \"\"\n",
|
350 |
+
" end_time = \"\"\n",
|
351 |
+
"\n",
|
352 |
+
" current_frame += 1\n",
|
353 |
+
" cap.release()\n",
|
354 |
+
"\n",
|
355 |
+
" return \"\\n\".join(res)\n",
|
356 |
+
"\n",
|
357 |
+
" except Exception as e:\n",
|
358 |
+
"\n",
|
359 |
+
" return e\n",
|
360 |
+
"\n",
|
361 |
+
"def all_funcs(openai_key,video_path, yolo_weights):\n",
|
362 |
+
" transcript = video_transcription(video_path)\n",
|
363 |
+
" sentences = action_detection(json.loads(transcript), openai_key)\n",
|
364 |
+
" batons = process_video(video_path, yolo_weights)\n",
|
365 |
+
"\n",
|
366 |
+
" return sentences, batons"
|
367 |
+
],
|
368 |
+
"metadata": {
|
369 |
+
"id": "YMDeEOwLtGYN"
|
370 |
+
},
|
371 |
+
"execution_count": 16,
|
372 |
+
"outputs": []
|
373 |
+
},
|
374 |
+
{
|
375 |
+
"cell_type": "code",
|
376 |
+
"source": [
|
377 |
+
"btn = gr.Interface(\n",
|
378 |
+
" fn=all_funcs,\n",
|
379 |
+
" inputs=[\"text\",gr.Files(label=\"Select Video File\"), gr.Files(label=\"Select YOLOv8 Weights File\")],\n",
|
380 |
+
" outputs=[gr.Textbox(label=\"Audio analysis time stamps\",lines=20), gr.Textbox(label=\"Baton detection timestamps\",lines=20)]\n",
|
381 |
+
")\n",
|
382 |
+
"\n",
|
383 |
+
"btn.launch()\n"
|
384 |
+
],
|
385 |
+
"metadata": {
|
386 |
+
"colab": {
|
387 |
+
"base_uri": "https://localhost:8080/",
|
388 |
+
"height": 645
|
389 |
+
},
|
390 |
+
"id": "ZbZKUNl3Mttf",
|
391 |
+
"outputId": "bb7e6e43-1bce-4b35-da0e-49ef39093d16"
|
392 |
+
},
|
393 |
+
"execution_count": 24,
|
394 |
+
"outputs": [
|
395 |
+
{
|
396 |
+
"output_type": "stream",
|
397 |
+
"name": "stdout",
|
398 |
+
"text": [
|
399 |
+
"Setting queue=True in a Colab notebook requires sharing enabled. Setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n",
|
400 |
+
"\n",
|
401 |
+
"Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n",
|
402 |
+
"Running on public URL: https://868bba52379b375588.gradio.live\n",
|
403 |
+
"\n",
|
404 |
+
"This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
|
405 |
+
]
|
406 |
+
},
|
407 |
+
{
|
408 |
+
"output_type": "display_data",
|
409 |
+
"data": {
|
410 |
+
"text/plain": [
|
411 |
+
"<IPython.core.display.HTML object>"
|
412 |
+
],
|
413 |
+
"text/html": [
|
414 |
+
"<div><iframe src=\"https://868bba52379b375588.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
415 |
+
]
|
416 |
+
},
|
417 |
+
"metadata": {}
|
418 |
+
},
|
419 |
+
{
|
420 |
+
"output_type": "execute_result",
|
421 |
+
"data": {
|
422 |
+
"text/plain": []
|
423 |
+
},
|
424 |
+
"metadata": {},
|
425 |
+
"execution_count": 24
|
426 |
+
}
|
427 |
+
]
|
428 |
+
},
|
429 |
+
{
|
430 |
+
"cell_type": "markdown",
|
431 |
+
"source": [
|
432 |
+
"# Baton Detection"
|
433 |
+
],
|
434 |
+
"metadata": {
|
435 |
+
"id": "NMF48OxVJHLp"
|
436 |
+
}
|
437 |
+
},
|
438 |
+
{
|
439 |
+
"cell_type": "code",
|
440 |
+
"source": [
|
441 |
+
"def process_video(video_path, weights):\n",
|
442 |
+
" try:\n",
|
443 |
+
" # This code cell detects batons in the video\n",
|
444 |
+
" current_frame = 0\n",
|
445 |
+
" model = YOLO(weights)\n",
|
446 |
+
" cap = cv2.VideoCapture(video_path)\n",
|
447 |
+
" fps = int(cap.get(cv2.CAP_PROP_FPS))\n",
|
448 |
+
" conseq_frames = 0\n",
|
449 |
+
" start_time = \"\"\n",
|
450 |
+
" end_time = \"\"\n",
|
451 |
+
" res = []\n",
|
452 |
+
"\n",
|
453 |
+
" while True:\n",
|
454 |
+
" ret, frame = cap.read()\n",
|
455 |
+
" if not ret:\n",
|
456 |
+
" break\n",
|
457 |
+
"\n",
|
458 |
+
" # Detecting baton on one frame per second\n",
|
459 |
+
" if current_frame % fps == 0:\n",
|
460 |
+
" currect_sec = current_frame/fps\n",
|
461 |
+
"\n",
|
462 |
+
" # Model prediction on current frame\n",
|
463 |
+
" results = model(frame, verbose = False)\n",
|
464 |
+
" count = 0\n",
|
465 |
+
" classes = results[0].boxes.data\n",
|
466 |
+
"\n",
|
467 |
+
" # Formatting the time for printing\n",
|
468 |
+
" hours, remainder = divmod(currect_sec, 3600)\n",
|
469 |
+
" minutes, seconds = divmod(remainder, 60)\n",
|
470 |
+
" hours = str(int(hours)).zfill(2)\n",
|
471 |
+
" minutes = str(int(minutes)).zfill(2)\n",
|
472 |
+
" seconds = str(int(seconds)).zfill(2)\n",
|
473 |
+
"\n",
|
474 |
+
" for i in classes:\n",
|
475 |
+
"\n",
|
476 |
+
" # Checking if baton is detected (i.e. if the class corresponding to baton is 1 or not)\n",
|
477 |
+
" if float(i[5]) == 1:\n",
|
478 |
+
" count+=1\n",
|
479 |
+
"\n",
|
480 |
+
" # Marking the start_time if this is the first consecutive frame a baton is detected in\n",
|
481 |
+
" if count >= 1:\n",
|
482 |
+
" conseq_frames+=1\n",
|
483 |
+
" if conseq_frames == 1:\n",
|
484 |
+
" start_time = hours + \":\" + minutes + \":\" + seconds\n",
|
485 |
+
"\n",
|
486 |
+
" # Marking the end time if after one or multiple consecutive frames of detection, a baton is not detected\n",
|
487 |
+
" else:\n",
|
488 |
+
" if conseq_frames > 0:\n",
|
489 |
+
" conseq_frames = 0\n",
|
490 |
+
" end_time = hours + \":\" + minutes + \":\" + seconds\n",
|
491 |
+
"\n",
|
492 |
+
" # Printing time intervals in which baton was detected\n",
|
493 |
+
" res.append(start_time + \" to \" + end_time)\n",
|
494 |
+
" start_time = \"\"\n",
|
495 |
+
" end_time = \"\"\n",
|
496 |
+
"\n",
|
497 |
+
" current_frame += 1\n",
|
498 |
+
" cap.release()\n",
|
499 |
+
"\n",
|
500 |
+
" return \"\\n\".join(res)\n",
|
501 |
+
"\n",
|
502 |
+
" except Exception as e:\n",
|
503 |
+
"\n",
|
504 |
+
" return e"
|
505 |
+
],
|
506 |
+
"metadata": {
|
507 |
+
"id": "8VSlkVeNJQo4"
|
508 |
+
},
|
509 |
+
"execution_count": null,
|
510 |
+
"outputs": []
|
511 |
+
},
|
512 |
+
{
|
513 |
+
"cell_type": "code",
|
514 |
+
"source": [
|
515 |
+
"with gr.Blocks() as demo:\n",
|
516 |
+
"\n",
|
517 |
+
" video_path = gr.Textbox(label = \"Enter Path to Video\")\n",
|
518 |
+
" #openai_keys = gr.Textbox(label = \"Enter your OpenAI Key\")\n",
|
519 |
+
" weights = gr.Textbox(label = \"Enter Path to YOLOv8 Weights\")\n",
|
520 |
+
" #sentences = gr.Textbox(label = \"Sentences Detected\")\n",
|
521 |
+
" batons = gr.Textbox(label = \"Batons Detected\")\n",
|
522 |
+
" btn = gr.Button(value = \"Process Video\")\n",
|
523 |
+
" btn.click(process_video, inputs = [video_path, weights], outputs = batons)\n",
|
524 |
+
"\n",
|
525 |
+
"demo.launch()"
|
526 |
+
],
|
527 |
+
"metadata": {
|
528 |
+
"colab": {
|
529 |
+
"base_uri": "https://localhost:8080/",
|
530 |
+
"height": 626
|
531 |
+
},
|
532 |
+
"id": "uSBK_3VBJQmC",
|
533 |
+
"outputId": "30c43f6f-72c7-4416-db52-741064696ed1"
|
534 |
+
},
|
535 |
+
"execution_count": null,
|
536 |
+
"outputs": [
|
537 |
+
{
|
538 |
+
"output_type": "stream",
|
539 |
+
"name": "stdout",
|
540 |
+
"text": [
|
541 |
+
"Setting queue=True in a Colab notebook requires sharing enabled. Setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n",
|
542 |
+
"\n",
|
543 |
+
"Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n",
|
544 |
+
"Running on public URL: https://7b4faa7028f75417d8.gradio.live\n",
|
545 |
+
"\n",
|
546 |
+
"This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
|
547 |
+
]
|
548 |
+
},
|
549 |
+
{
|
550 |
+
"output_type": "display_data",
|
551 |
+
"data": {
|
552 |
+
"text/plain": [
|
553 |
+
"<IPython.core.display.HTML object>"
|
554 |
+
],
|
555 |
+
"text/html": [
|
556 |
+
"<div><iframe src=\"https://7b4faa7028f75417d8.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
557 |
+
]
|
558 |
+
},
|
559 |
+
"metadata": {}
|
560 |
+
},
|
561 |
+
{
|
562 |
+
"output_type": "execute_result",
|
563 |
+
"data": {
|
564 |
+
"text/plain": []
|
565 |
+
},
|
566 |
+
"metadata": {},
|
567 |
+
"execution_count": 9
|
568 |
+
}
|
569 |
+
]
|
570 |
+
},
|
571 |
+
{
|
572 |
+
"cell_type": "code",
|
573 |
+
"source": [
|
574 |
+
"/content/drive/MyDrive/Spark Project/Test_Video.mp4"
|
575 |
+
],
|
576 |
+
"metadata": {
|
577 |
+
"id": "Iqdjv7QowrZW"
|
578 |
+
},
|
579 |
+
"execution_count": null,
|
580 |
+
"outputs": []
|
581 |
+
},
|
582 |
+
{
|
583 |
+
"cell_type": "code",
|
584 |
+
"source": [
|
585 |
+
"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"
|
586 |
+
],
|
587 |
+
"metadata": {
|
588 |
+
"id": "v9-4b-gfrbwa"
|
589 |
+
},
|
590 |
+
"execution_count": null,
|
591 |
+
"outputs": []
|
592 |
+
},
|
593 |
+
{
|
594 |
+
"cell_type": "code",
|
595 |
+
"source": [
|
596 |
+
"/content/drive/MyDrive/Spark Project/Data (For YOLOv8 Training)/Option 3 - Roboflow (60 Images)/YOLOv8 Best Weights.pt"
|
597 |
+
],
|
598 |
+
"metadata": {
|
599 |
+
"id": "nPh9wSJvwvAt"
|
600 |
+
},
|
601 |
+
"execution_count": null,
|
602 |
+
"outputs": []
|
603 |
+
},
|
604 |
+
{
|
605 |
+
"cell_type": "code",
|
606 |
+
"source": [
|
607 |
+
"process_video(\"/content/drive/MyDrive/Spark Project/Test_Video.mp4\", \"/content/drive/MyDrive/Spark Project/Data (For YOLOv8 Training)/Option 3 - Roboflow (60 Images)/YOLOv8 Best Weights.pt\")"
|
608 |
+
],
|
609 |
+
"metadata": {
|
610 |
+
"id": "CvuZJI3-LGOU"
|
611 |
+
},
|
612 |
+
"execution_count": null,
|
613 |
+
"outputs": []
|
614 |
+
},
|
615 |
+
{
|
616 |
+
"cell_type": "code",
|
617 |
+
"source": [
|
618 |
+
"a = video_transcription(\"/content/drive/MyDrive/Spark Project/Test_Video.mp4\")\n",
|
619 |
+
"a"
|
620 |
+
],
|
621 |
+
"metadata": {
|
622 |
+
"id": "t7ZZYQp_tbN4"
|
623 |
+
},
|
624 |
+
"execution_count": null,
|
625 |
+
"outputs": []
|
626 |
+
},
|
627 |
+
{
|
628 |
+
"cell_type": "code",
|
629 |
+
"source": [
|
630 |
+
"!pip install ultralytics"
|
631 |
+
],
|
632 |
+
"metadata": {
|
633 |
+
"colab": {
|
634 |
+
"base_uri": "https://localhost:8080/"
|
635 |
+
},
|
636 |
+
"id": "mtv7izc3HQHP",
|
637 |
+
"outputId": "d7fed4ac-3d97-4580-bd21-f698f84f9615"
|
638 |
+
},
|
639 |
+
"execution_count": null,
|
640 |
+
"outputs": [
|
641 |
+
{
|
642 |
+
"output_type": "stream",
|
643 |
+
"name": "stdout",
|
644 |
+
"text": [
|
645 |
+
"Collecting ultralytics\n",
|
646 |
+
" Downloading ultralytics-8.0.214-py3-none-any.whl (645 kB)\n",
|
647 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m645.5/645.5 kB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
648 |
+
"\u001b[?25hRequirement already satisfied: matplotlib>=3.3.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (3.7.1)\n",
|
649 |
+
"Requirement already satisfied: numpy>=1.22.2 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (1.23.5)\n",
|
650 |
+
"Requirement already satisfied: opencv-python>=4.6.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (4.8.0.76)\n",
|
651 |
+
"Requirement already satisfied: pillow>=7.1.2 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (9.4.0)\n",
|
652 |
+
"Requirement already satisfied: pyyaml>=5.3.1 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (6.0.1)\n",
|
653 |
+
"Requirement already satisfied: requests>=2.23.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (2.31.0)\n",
|
654 |
+
"Requirement already satisfied: scipy>=1.4.1 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (1.11.3)\n",
|
655 |
+
"Requirement already satisfied: torch>=1.8.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (2.1.0+cu118)\n",
|
656 |
+
"Requirement already satisfied: torchvision>=0.9.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (0.16.0+cu118)\n",
|
657 |
+
"Requirement already satisfied: tqdm>=4.64.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (4.66.1)\n",
|
658 |
+
"Requirement already satisfied: pandas>=1.1.4 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (1.5.3)\n",
|
659 |
+
"Requirement already satisfied: seaborn>=0.11.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (0.12.2)\n",
|
660 |
+
"Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from ultralytics) (5.9.5)\n",
|
661 |
+
"Requirement already satisfied: py-cpuinfo in /usr/local/lib/python3.10/dist-packages (from ultralytics) (9.0.0)\n",
|
662 |
+
"Collecting thop>=0.1.1 (from ultralytics)\n",
|
663 |
+
" Downloading thop-0.1.1.post2209072238-py3-none-any.whl (15 kB)\n",
|
664 |
+
"Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics) (1.2.0)\n",
|
665 |
+
"Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics) (0.12.1)\n",
|
666 |
+
"Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics) (4.44.3)\n",
|
667 |
+
"Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics) (1.4.5)\n",
|
668 |
+
"Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics) (23.2)\n",
|
669 |
+
"Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics) (3.1.1)\n",
|
670 |
+
"Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics) (2.8.2)\n",
|
671 |
+
"Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas>=1.1.4->ultralytics) (2023.3.post1)\n",
|
672 |
+
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->ultralytics) (3.3.2)\n",
|
673 |
+
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->ultralytics) (3.4)\n",
|
674 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->ultralytics) (2.0.7)\n",
|
675 |
+
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->ultralytics) (2023.7.22)\n",
|
676 |
+
"Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch>=1.8.0->ultralytics) (3.13.1)\n",
|
677 |
+
"Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch>=1.8.0->ultralytics) (4.5.0)\n",
|
678 |
+
"Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch>=1.8.0->ultralytics) (1.12)\n",
|
679 |
+
"Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch>=1.8.0->ultralytics) (3.2.1)\n",
|
680 |
+
"Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.8.0->ultralytics) (3.1.2)\n",
|
681 |
+
"Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch>=1.8.0->ultralytics) (2023.6.0)\n",
|
682 |
+
"Requirement already satisfied: triton==2.1.0 in /usr/local/lib/python3.10/dist-packages (from torch>=1.8.0->ultralytics) (2.1.0)\n",
|
683 |
+
"Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib>=3.3.0->ultralytics) (1.16.0)\n",
|
684 |
+
"Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch>=1.8.0->ultralytics) (2.1.3)\n",
|
685 |
+
"Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.8.0->ultralytics) (1.3.0)\n",
|
686 |
+
"Installing collected packages: thop, ultralytics\n",
|
687 |
+
"Successfully installed thop-0.1.1.post2209072238 ultralytics-8.0.214\n"
|
688 |
+
]
|
689 |
+
}
|
690 |
+
]
|
691 |
+
}
|
692 |
+
]
|
693 |
+
}
|
Dockerfile
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10.9
|
2 |
+
|
3 |
+
WORKDIR /workspace
|
4 |
+
|
5 |
+
RUN cd /workspace
|
6 |
+
RUN apt-get update -y
|
7 |
+
RUN apt install libgl1-mesa-glx -y
|
8 |
+
RUN apt-get install 'ffmpeg'\
|
9 |
+
'libsm6'\
|
10 |
+
'libxext6' -y
|
11 |
+
|
12 |
+
COPY . .
|
13 |
+
|
14 |
+
|
15 |
+
# ADD requirements.txt main.py /workspace/
|
16 |
+
|
17 |
+
RUN pip install -r requirements.txt
|
18 |
+
RUN apt-get update && apt-get install libglu1
|
19 |
+
|
20 |
+
CMD ["python", "app.py"]
|
README.md
CHANGED
@@ -1,12 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 4.10.0
|
8 |
app_file: app.py
|
9 |
-
|
|
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: spark-ds549bodycam-deployment
|
|
|
|
|
|
|
|
|
|
|
3 |
app_file: app.py
|
4 |
+
sdk: gradio
|
5 |
+
sdk_version: 4.7.1
|
6 |
---
|
|
|
|
Spark_Deployment.ipynb
ADDED
@@ -0,0 +1,703 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"metadata": {
|
7 |
+
"colab": {
|
8 |
+
"base_uri": "https://localhost:8080/"
|
9 |
+
},
|
10 |
+
"id": "mM4DvqTJGzwt",
|
11 |
+
"outputId": "f9e04f60-2edd-4c55-962b-b18f5dd37001"
|
12 |
+
},
|
13 |
+
"outputs": [
|
14 |
+
{
|
15 |
+
"name": "stdout",
|
16 |
+
"output_type": "stream",
|
17 |
+
"text": [
|
18 |
+
"Mounted at /content/drive\n"
|
19 |
+
]
|
20 |
+
}
|
21 |
+
],
|
22 |
+
"source": [
|
23 |
+
"from google.colab import drive\n",
|
24 |
+
"drive.mount('/content/drive')"
|
25 |
+
]
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"cell_type": "code",
|
29 |
+
"execution_count": null,
|
30 |
+
"metadata": {
|
31 |
+
"id": "rQkjmqCqRlVs"
|
32 |
+
},
|
33 |
+
"outputs": [],
|
34 |
+
"source": [
|
35 |
+
"!sudo apt update && sudo apt install ffmpeg"
|
36 |
+
]
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"cell_type": "code",
|
40 |
+
"execution_count": 30,
|
41 |
+
"metadata": {
|
42 |
+
"colab": {
|
43 |
+
"base_uri": "https://localhost:8080/"
|
44 |
+
},
|
45 |
+
"id": "6qpSdD_WSiC3",
|
46 |
+
"outputId": "bd8400dc-244a-4529-902e-8838775f431c"
|
47 |
+
},
|
48 |
+
"outputs": [
|
49 |
+
{
|
50 |
+
"name": "stdout",
|
51 |
+
"output_type": "stream",
|
52 |
+
"text": [
|
53 |
+
"Cloning into 'ml-nlgma-body-cam'...\n",
|
54 |
+
"remote: Enumerating objects: 395, done.\u001b[K\n",
|
55 |
+
"remote: Counting objects: 100% (292/292), done.\u001b[K\n",
|
56 |
+
"remote: Compressing objects: 100% (239/239), done.\u001b[K\n",
|
57 |
+
"remote: Total 395 (delta 77), reused 224 (delta 47), pack-reused 103\u001b[K\n",
|
58 |
+
"Receiving objects: 100% (395/395), 9.15 MiB | 6.64 MiB/s, done.\n",
|
59 |
+
"Resolving deltas: 100% (117/117), done.\n"
|
60 |
+
]
|
61 |
+
}
|
62 |
+
],
|
63 |
+
"source": [
|
64 |
+
"!git clone https://github.com/k-sashank/ml-nlgma-body-cam.git"
|
65 |
+
]
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"cell_type": "code",
|
69 |
+
"execution_count": 34,
|
70 |
+
"metadata": {
|
71 |
+
"colab": {
|
72 |
+
"base_uri": "https://localhost:8080/"
|
73 |
+
},
|
74 |
+
"id": "S1z-orpESvt7",
|
75 |
+
"outputId": "5a2fdae3-455a-4234-e4cb-da7578ca8a6c"
|
76 |
+
},
|
77 |
+
"outputs": [
|
78 |
+
{
|
79 |
+
"name": "stdout",
|
80 |
+
"output_type": "stream",
|
81 |
+
"text": [
|
82 |
+
"/content/ml-nlgma-body-cam/deployment\n"
|
83 |
+
]
|
84 |
+
}
|
85 |
+
],
|
86 |
+
"source": [
|
87 |
+
"cd ml-nlgma-body-cam/deployment/"
|
88 |
+
]
|
89 |
+
},
|
90 |
+
{
|
91 |
+
"cell_type": "code",
|
92 |
+
"execution_count": 29,
|
93 |
+
"metadata": {
|
94 |
+
"colab": {
|
95 |
+
"base_uri": "https://localhost:8080/"
|
96 |
+
},
|
97 |
+
"id": "byk_2SlPsWAZ",
|
98 |
+
"outputId": "6286e21b-41ce-40cc-861b-022f6275f4a5"
|
99 |
+
},
|
100 |
+
"outputs": [
|
101 |
+
{
|
102 |
+
"name": "stdout",
|
103 |
+
"output_type": "stream",
|
104 |
+
"text": [
|
105 |
+
"Requirement already satisfied: opencv-python in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 1)) (4.8.0.76)\n",
|
106 |
+
"Requirement already satisfied: ultralytics in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 2)) (8.0.219)\n",
|
107 |
+
"Requirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 3)) (2.1.0+cu118)\n",
|
108 |
+
"Requirement already satisfied: transformers in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 4)) (4.35.2)\n",
|
109 |
+
"Requirement already satisfied: Cython in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 5)) (3.0.5)\n",
|
110 |
+
"Requirement already satisfied: wget in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 6)) (3.2)\n",
|
111 |
+
"Requirement already satisfied: accelerate in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 7)) (0.24.1)\n",
|
112 |
+
"Requirement already satisfied: openai in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 8)) (1.3.5)\n",
|
113 |
+
"Requirement already satisfied: openai-whisper in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 9)) (20231117)\n",
|
114 |
+
"Requirement already satisfied: numpy>=1.21.2 in /usr/local/lib/python3.10/dist-packages (from opencv-python->-r requirements.txt (line 1)) (1.23.5)\n",
|
115 |
+
"Requirement already satisfied: matplotlib>=3.3.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (3.7.1)\n",
|
116 |
+
"Requirement already satisfied: pillow>=7.1.2 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (9.4.0)\n",
|
117 |
+
"Requirement already satisfied: pyyaml>=5.3.1 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (6.0.1)\n",
|
118 |
+
"Requirement already satisfied: requests>=2.23.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (2.31.0)\n",
|
119 |
+
"Requirement already satisfied: scipy>=1.4.1 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (1.11.3)\n",
|
120 |
+
"Requirement already satisfied: torchvision>=0.9.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (0.16.0+cu118)\n",
|
121 |
+
"Requirement already satisfied: tqdm>=4.64.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (4.66.1)\n",
|
122 |
+
"Requirement already satisfied: pandas>=1.1.4 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (1.5.3)\n",
|
123 |
+
"Requirement already satisfied: seaborn>=0.11.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (0.12.2)\n",
|
124 |
+
"Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (5.9.5)\n",
|
125 |
+
"Requirement already satisfied: py-cpuinfo in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (9.0.0)\n",
|
126 |
+
"Requirement already satisfied: thop>=0.1.1 in /usr/local/lib/python3.10/dist-packages (from ultralytics->-r requirements.txt (line 2)) (0.1.1.post2209072238)\n",
|
127 |
+
"Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch->-r requirements.txt (line 3)) (3.13.1)\n",
|
128 |
+
"Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch->-r requirements.txt (line 3)) (4.8.0)\n",
|
129 |
+
"Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch->-r requirements.txt (line 3)) (1.12)\n",
|
130 |
+
"Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch->-r requirements.txt (line 3)) (3.2.1)\n",
|
131 |
+
"Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch->-r requirements.txt (line 3)) (3.1.2)\n",
|
132 |
+
"Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch->-r requirements.txt (line 3)) (2023.6.0)\n",
|
133 |
+
"Requirement already satisfied: triton==2.1.0 in /usr/local/lib/python3.10/dist-packages (from torch->-r requirements.txt (line 3)) (2.1.0)\n",
|
134 |
+
"Requirement already satisfied: huggingface-hub<1.0,>=0.16.4 in /usr/local/lib/python3.10/dist-packages (from transformers->-r requirements.txt (line 4)) (0.19.4)\n",
|
135 |
+
"Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers->-r requirements.txt (line 4)) (23.2)\n",
|
136 |
+
"Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers->-r requirements.txt (line 4)) (2023.6.3)\n",
|
137 |
+
"Requirement already satisfied: tokenizers<0.19,>=0.14 in /usr/local/lib/python3.10/dist-packages (from transformers->-r requirements.txt (line 4)) (0.15.0)\n",
|
138 |
+
"Requirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.10/dist-packages (from transformers->-r requirements.txt (line 4)) (0.4.0)\n",
|
139 |
+
"Requirement already satisfied: anyio<4,>=3.5.0 in /usr/local/lib/python3.10/dist-packages (from openai->-r requirements.txt (line 8)) (3.7.1)\n",
|
140 |
+
"Requirement already satisfied: distro<2,>=1.7.0 in /usr/lib/python3/dist-packages (from openai->-r requirements.txt (line 8)) (1.7.0)\n",
|
141 |
+
"Requirement already satisfied: httpx<1,>=0.23.0 in /usr/local/lib/python3.10/dist-packages (from openai->-r requirements.txt (line 8)) (0.25.2)\n",
|
142 |
+
"Requirement already satisfied: pydantic<3,>=1.9.0 in /usr/local/lib/python3.10/dist-packages (from openai->-r requirements.txt (line 8)) (2.5.2)\n",
|
143 |
+
"Requirement already satisfied: numba in /usr/local/lib/python3.10/dist-packages (from openai-whisper->-r requirements.txt (line 9)) (0.58.1)\n",
|
144 |
+
"Requirement already satisfied: more-itertools in /usr/local/lib/python3.10/dist-packages (from openai-whisper->-r requirements.txt (line 9)) (10.1.0)\n",
|
145 |
+
"Requirement already satisfied: tiktoken in /usr/local/lib/python3.10/dist-packages (from openai-whisper->-r requirements.txt (line 9)) (0.5.1)\n",
|
146 |
+
"Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.10/dist-packages (from anyio<4,>=3.5.0->openai->-r requirements.txt (line 8)) (3.4)\n",
|
147 |
+
"Requirement already satisfied: sniffio>=1.1 in /usr/local/lib/python3.10/dist-packages (from anyio<4,>=3.5.0->openai->-r requirements.txt (line 8)) (1.3.0)\n",
|
148 |
+
"Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio<4,>=3.5.0->openai->-r requirements.txt (line 8)) (1.1.3)\n",
|
149 |
+
"Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx<1,>=0.23.0->openai->-r requirements.txt (line 8)) (2023.7.22)\n",
|
150 |
+
"Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.10/dist-packages (from httpx<1,>=0.23.0->openai->-r requirements.txt (line 8)) (1.0.2)\n",
|
151 |
+
"Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.10/dist-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai->-r requirements.txt (line 8)) (0.14.0)\n",
|
152 |
+
"Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics->-r requirements.txt (line 2)) (1.2.0)\n",
|
153 |
+
"Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics->-r requirements.txt (line 2)) (0.12.1)\n",
|
154 |
+
"Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics->-r requirements.txt (line 2)) (4.44.3)\n",
|
155 |
+
"Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics->-r requirements.txt (line 2)) (1.4.5)\n",
|
156 |
+
"Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics->-r requirements.txt (line 2)) (3.1.1)\n",
|
157 |
+
"Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics->-r requirements.txt (line 2)) (2.8.2)\n",
|
158 |
+
"Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas>=1.1.4->ultralytics->-r requirements.txt (line 2)) (2023.3.post1)\n",
|
159 |
+
"Requirement already satisfied: annotated-types>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from pydantic<3,>=1.9.0->openai->-r requirements.txt (line 8)) (0.6.0)\n",
|
160 |
+
"Requirement already satisfied: pydantic-core==2.14.5 in /usr/local/lib/python3.10/dist-packages (from pydantic<3,>=1.9.0->openai->-r requirements.txt (line 8)) (2.14.5)\n",
|
161 |
+
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->ultralytics->-r requirements.txt (line 2)) (3.3.2)\n",
|
162 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->ultralytics->-r requirements.txt (line 2)) (2.0.7)\n",
|
163 |
+
"Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch->-r requirements.txt (line 3)) (2.1.3)\n",
|
164 |
+
"Requirement already satisfied: llvmlite<0.42,>=0.41.0dev0 in /usr/local/lib/python3.10/dist-packages (from numba->openai-whisper->-r requirements.txt (line 9)) (0.41.1)\n",
|
165 |
+
"Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch->-r requirements.txt (line 3)) (1.3.0)\n",
|
166 |
+
"Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib>=3.3.0->ultralytics->-r requirements.txt (line 2)) (1.16.0)\n"
|
167 |
+
]
|
168 |
+
}
|
169 |
+
],
|
170 |
+
"source": [
|
171 |
+
"!pip install -r requirements.txt"
|
172 |
+
]
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"cell_type": "code",
|
176 |
+
"execution_count": 8,
|
177 |
+
"metadata": {
|
178 |
+
"id": "3ZrCX53_ssHy"
|
179 |
+
},
|
180 |
+
"outputs": [],
|
181 |
+
"source": [
|
182 |
+
"import whisper\n",
|
183 |
+
"import cv2\n",
|
184 |
+
"import os\n",
|
185 |
+
"import urllib.request\n",
|
186 |
+
"from PIL import Image\n",
|
187 |
+
"from ultralytics import YOLO\n",
|
188 |
+
"import torch\n",
|
189 |
+
"import matplotlib.pyplot as plt\n",
|
190 |
+
"from tqdm import tqdm\n",
|
191 |
+
"from transformers import pipeline\n",
|
192 |
+
"import moviepy.editor as mp\n",
|
193 |
+
"import json\n",
|
194 |
+
"import re\n",
|
195 |
+
"import gradio as gr\n",
|
196 |
+
"from openai import OpenAI"
|
197 |
+
]
|
198 |
+
},
|
199 |
+
{
|
200 |
+
"cell_type": "markdown",
|
201 |
+
"metadata": {
|
202 |
+
"id": "lmWMJ_FeJDyD"
|
203 |
+
},
|
204 |
+
"source": [
|
205 |
+
"# Full"
|
206 |
+
]
|
207 |
+
},
|
208 |
+
{
|
209 |
+
"cell_type": "code",
|
210 |
+
"execution_count": 16,
|
211 |
+
"metadata": {
|
212 |
+
"id": "YMDeEOwLtGYN"
|
213 |
+
},
|
214 |
+
"outputs": [],
|
215 |
+
"source": [
|
216 |
+
"def video_transcription(video_path):\n",
|
217 |
+
" try:\n",
|
218 |
+
" model = whisper.load_model('large-v3')\n",
|
219 |
+
" transcript = model.transcribe(video_path, verbose = True)\n",
|
220 |
+
"\n",
|
221 |
+
" # JSON Dump (Find a way to not create a file and just dump into a variable or something)\n",
|
222 |
+
" #json_file_path = video_path.split('/')[-1][:-4]+ \".json\"\n",
|
223 |
+
" #with open(json_file_path, 'w') as json_file:\n",
|
224 |
+
" #json.dump(transcript, json_file, indent = 2)\n",
|
225 |
+
" #json_file_path = video_path.split('/')[-1][:-4]+ \".json\"\n",
|
226 |
+
" #with open(json_file_path, 'w') as json_file:\n",
|
227 |
+
" return json.dump(transcript)\n",
|
228 |
+
"\n",
|
229 |
+
" except Exception as e:\n",
|
230 |
+
" return e\n",
|
231 |
+
"\n",
|
232 |
+
"def action_detection(transcript, openai_key):\n",
|
233 |
+
" try:\n",
|
234 |
+
" # JSON Dump (Find a way to not create a file and just dump into a variable or something)\n",
|
235 |
+
" #with open(json_path, 'r') as f:\n",
|
236 |
+
" # transcript = json.load(f)\n",
|
237 |
+
" transcript_string = ''\n",
|
238 |
+
" for segments in transcript['segments']:\n",
|
239 |
+
" transcript_string+=str(segments['id'])+str(segments['text']+'\\n')\n",
|
240 |
+
"\n",
|
241 |
+
" client = OpenAI(api_key = openai_key)\n",
|
242 |
+
"\n",
|
243 |
+
" completion = client.chat.completions.create(\n",
|
244 |
+
" model=\"gpt-3.5-turbo-1106\",\n",
|
245 |
+
" messages=[\n",
|
246 |
+
" {\"role\": \"system\", \"content\": f\"Given this {transcript_string} You are an AI system specialized in detecting planning issues, critiquing plans, and analyzing conversations between people regarding how to disperse. Additionally, identify any instances suggesting 1st Amendment violations or officers expressing the belief that this protest was anti-police. Finally, flag any aggressive comments found in the audio transcript.\"},\n",
|
247 |
+
" {\"role\": \"user\", \"content\":\"Give responce like this following examples: Sentence: '18: What do you got?' Explanation: This sentence may indicate confusion or a need for clarification, as the speaker is asking for information. It could potentially be a planning issue if the speaker is seeking information to execute a specific task.\"}\n",
|
248 |
+
" ]\n",
|
249 |
+
" )\n",
|
250 |
+
"\n",
|
251 |
+
" output = completion.choices[0].message.content\n",
|
252 |
+
"\n",
|
253 |
+
" paragraphs = re.split(r'\\n\\n', output)\n",
|
254 |
+
"\n",
|
255 |
+
" sentences = []\n",
|
256 |
+
" explanations = []\n",
|
257 |
+
"\n",
|
258 |
+
" for paragraph in paragraphs:\n",
|
259 |
+
" sentence_match = re.search(r\"Sentence: '(.+)'\", paragraph)\n",
|
260 |
+
" explanation_match = re.search(r\"Explanation: (.+)\", paragraph)\n",
|
261 |
+
"\n",
|
262 |
+
" if sentence_match and explanation_match:\n",
|
263 |
+
" sentences.append(sentence_match.group(1).split(': ')[-1])\n",
|
264 |
+
" explanations.append(explanation_match.group(1))\n",
|
265 |
+
"\n",
|
266 |
+
" #for i in range(len(sentences)):\n",
|
267 |
+
" # print(f\"Sentence: '{sentences[i]}'\")\n",
|
268 |
+
" # print(f\"Explanation: {explanations[i]}\\n\")\n",
|
269 |
+
"\n",
|
270 |
+
" for sentence_to_search in sentences:\n",
|
271 |
+
" pattern = re.compile(re.escape(sentence_to_search), re.IGNORECASE)\n",
|
272 |
+
"\n",
|
273 |
+
" matching_entries = [entry for entry in transcript['segments'] if re.search(pattern, entry['text'])]\n",
|
274 |
+
"\n",
|
275 |
+
" sent_with_time = []\n",
|
276 |
+
" if matching_entries:\n",
|
277 |
+
" for entry in matching_entries:\n",
|
278 |
+
" sent_with_time.append(sentence_to_search + ' Start Time: ', entry['start'] + ' End Time: ', entry['end'])\n",
|
279 |
+
"\n",
|
280 |
+
" return sent_with_time\n",
|
281 |
+
"\n",
|
282 |
+
" except Exception as e:\n",
|
283 |
+
" return e\n",
|
284 |
+
"\n",
|
285 |
+
"def process_video(video_path, weights):\n",
|
286 |
+
" try:\n",
|
287 |
+
" # This code cell detects batons in the video\n",
|
288 |
+
" current_frame = 0\n",
|
289 |
+
" model = YOLO(weights)\n",
|
290 |
+
" cap = cv2.VideoCapture(video_path)\n",
|
291 |
+
" fps = int(cap.get(cv2.CAP_PROP_FPS))\n",
|
292 |
+
" conseq_frames = 0\n",
|
293 |
+
" start_time = \"\"\n",
|
294 |
+
" end_time = \"\"\n",
|
295 |
+
" res = []\n",
|
296 |
+
"\n",
|
297 |
+
" while True:\n",
|
298 |
+
" ret, frame = cap.read()\n",
|
299 |
+
" if not ret:\n",
|
300 |
+
" break\n",
|
301 |
+
"\n",
|
302 |
+
" # Detecting baton on one frame per second\n",
|
303 |
+
" if current_frame % fps == 0:\n",
|
304 |
+
" currect_sec = current_frame/fps\n",
|
305 |
+
"\n",
|
306 |
+
" # Model prediction on current frame\n",
|
307 |
+
" results = model(frame, verbose = False)\n",
|
308 |
+
" count = 0\n",
|
309 |
+
" classes = results[0].boxes.data\n",
|
310 |
+
"\n",
|
311 |
+
" # Formatting the time for printing\n",
|
312 |
+
" hours, remainder = divmod(currect_sec, 3600)\n",
|
313 |
+
" minutes, seconds = divmod(remainder, 60)\n",
|
314 |
+
" hours = str(int(hours)).zfill(2)\n",
|
315 |
+
" minutes = str(int(minutes)).zfill(2)\n",
|
316 |
+
" seconds = str(int(seconds)).zfill(2)\n",
|
317 |
+
"\n",
|
318 |
+
" for i in classes:\n",
|
319 |
+
"\n",
|
320 |
+
" # Checking if baton is detected (i.e. if the class corresponding to baton is 1 or not)\n",
|
321 |
+
" if float(i[5]) == 1:\n",
|
322 |
+
" count+=1\n",
|
323 |
+
"\n",
|
324 |
+
" # Marking the start_time if this is the first consecutive frame a baton is detected in\n",
|
325 |
+
" if count >= 1:\n",
|
326 |
+
" conseq_frames+=1\n",
|
327 |
+
" if conseq_frames == 1:\n",
|
328 |
+
" start_time = hours + \":\" + minutes + \":\" + seconds\n",
|
329 |
+
"\n",
|
330 |
+
" # Marking the end time if after one or multiple consecutive frames of detection, a baton is not detected\n",
|
331 |
+
" else:\n",
|
332 |
+
" if conseq_frames > 0:\n",
|
333 |
+
" conseq_frames = 0\n",
|
334 |
+
" end_time = hours + \":\" + minutes + \":\" + seconds\n",
|
335 |
+
"\n",
|
336 |
+
" # Printing time intervals in which baton was detected\n",
|
337 |
+
" res.append(start_time + \" to \" + end_time)\n",
|
338 |
+
" start_time = \"\"\n",
|
339 |
+
" end_time = \"\"\n",
|
340 |
+
"\n",
|
341 |
+
" current_frame += 1\n",
|
342 |
+
" cap.release()\n",
|
343 |
+
"\n",
|
344 |
+
" return \"\\n\".join(res)\n",
|
345 |
+
"\n",
|
346 |
+
" except Exception as e:\n",
|
347 |
+
"\n",
|
348 |
+
" return e\n",
|
349 |
+
"\n",
|
350 |
+
"def all_funcs(openai_key,video_path, yolo_weights):\n",
|
351 |
+
" transcript = video_transcription(video_path)\n",
|
352 |
+
" sentences = action_detection(json.loads(transcript), openai_key)\n",
|
353 |
+
" batons = process_video(video_path, yolo_weights)\n",
|
354 |
+
"\n",
|
355 |
+
" return sentences, batons"
|
356 |
+
]
|
357 |
+
},
|
358 |
+
{
|
359 |
+
"cell_type": "code",
|
360 |
+
"execution_count": 24,
|
361 |
+
"metadata": {
|
362 |
+
"colab": {
|
363 |
+
"base_uri": "https://localhost:8080/",
|
364 |
+
"height": 645
|
365 |
+
},
|
366 |
+
"id": "ZbZKUNl3Mttf",
|
367 |
+
"outputId": "bb7e6e43-1bce-4b35-da0e-49ef39093d16"
|
368 |
+
},
|
369 |
+
"outputs": [
|
370 |
+
{
|
371 |
+
"name": "stdout",
|
372 |
+
"output_type": "stream",
|
373 |
+
"text": [
|
374 |
+
"Setting queue=True in a Colab notebook requires sharing enabled. Setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n",
|
375 |
+
"\n",
|
376 |
+
"Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n",
|
377 |
+
"Running on public URL: https://868bba52379b375588.gradio.live\n",
|
378 |
+
"\n",
|
379 |
+
"This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
|
380 |
+
]
|
381 |
+
},
|
382 |
+
{
|
383 |
+
"data": {
|
384 |
+
"text/html": [
|
385 |
+
"<div><iframe src=\"https://868bba52379b375588.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
386 |
+
],
|
387 |
+
"text/plain": [
|
388 |
+
"<IPython.core.display.HTML object>"
|
389 |
+
]
|
390 |
+
},
|
391 |
+
"metadata": {},
|
392 |
+
"output_type": "display_data"
|
393 |
+
},
|
394 |
+
{
|
395 |
+
"data": {
|
396 |
+
"text/plain": []
|
397 |
+
},
|
398 |
+
"execution_count": 24,
|
399 |
+
"metadata": {},
|
400 |
+
"output_type": "execute_result"
|
401 |
+
}
|
402 |
+
],
|
403 |
+
"source": [
|
404 |
+
"btn = gr.Interface(\n",
|
405 |
+
" fn=all_funcs,\n",
|
406 |
+
" inputs=[\"text\",gr.Files(label=\"Select Video File\"), gr.Files(label=\"Select YOLOv8 Weights File\")],\n",
|
407 |
+
" outputs=[gr.Textbox(label=\"Audio analysis time stamps\",lines=20), gr.Textbox(label=\"Baton detection timestamps\",lines=20)]\n",
|
408 |
+
")\n",
|
409 |
+
"\n",
|
410 |
+
"btn.launch()\n"
|
411 |
+
]
|
412 |
+
},
|
413 |
+
{
|
414 |
+
"cell_type": "markdown",
|
415 |
+
"metadata": {
|
416 |
+
"id": "NMF48OxVJHLp"
|
417 |
+
},
|
418 |
+
"source": [
|
419 |
+
"# Baton Detection"
|
420 |
+
]
|
421 |
+
},
|
422 |
+
{
|
423 |
+
"cell_type": "code",
|
424 |
+
"execution_count": null,
|
425 |
+
"metadata": {
|
426 |
+
"id": "8VSlkVeNJQo4"
|
427 |
+
},
|
428 |
+
"outputs": [],
|
429 |
+
"source": [
|
430 |
+
"def process_video(video_path, weights):\n",
|
431 |
+
" try:\n",
|
432 |
+
" # This code cell detects batons in the video\n",
|
433 |
+
" current_frame = 0\n",
|
434 |
+
" model = YOLO(weights)\n",
|
435 |
+
" cap = cv2.VideoCapture(video_path)\n",
|
436 |
+
" fps = int(cap.get(cv2.CAP_PROP_FPS))\n",
|
437 |
+
" conseq_frames = 0\n",
|
438 |
+
" start_time = \"\"\n",
|
439 |
+
" end_time = \"\"\n",
|
440 |
+
" res = []\n",
|
441 |
+
"\n",
|
442 |
+
" while True:\n",
|
443 |
+
" ret, frame = cap.read()\n",
|
444 |
+
" if not ret:\n",
|
445 |
+
" break\n",
|
446 |
+
"\n",
|
447 |
+
" # Detecting baton on one frame per second\n",
|
448 |
+
" if current_frame % fps == 0:\n",
|
449 |
+
" currect_sec = current_frame/fps\n",
|
450 |
+
"\n",
|
451 |
+
" # Model prediction on current frame\n",
|
452 |
+
" results = model(frame, verbose = False)\n",
|
453 |
+
" count = 0\n",
|
454 |
+
" classes = results[0].boxes.data\n",
|
455 |
+
"\n",
|
456 |
+
" # Formatting the time for printing\n",
|
457 |
+
" hours, remainder = divmod(currect_sec, 3600)\n",
|
458 |
+
" minutes, seconds = divmod(remainder, 60)\n",
|
459 |
+
" hours = str(int(hours)).zfill(2)\n",
|
460 |
+
" minutes = str(int(minutes)).zfill(2)\n",
|
461 |
+
" seconds = str(int(seconds)).zfill(2)\n",
|
462 |
+
"\n",
|
463 |
+
" for i in classes:\n",
|
464 |
+
"\n",
|
465 |
+
" # Checking if baton is detected (i.e. if the class corresponding to baton is 1 or not)\n",
|
466 |
+
" if float(i[5]) == 1:\n",
|
467 |
+
" count+=1\n",
|
468 |
+
"\n",
|
469 |
+
" # Marking the start_time if this is the first consecutive frame a baton is detected in\n",
|
470 |
+
" if count >= 1:\n",
|
471 |
+
" conseq_frames+=1\n",
|
472 |
+
" if conseq_frames == 1:\n",
|
473 |
+
" start_time = hours + \":\" + minutes + \":\" + seconds\n",
|
474 |
+
"\n",
|
475 |
+
" # Marking the end time if after one or multiple consecutive frames of detection, a baton is not detected\n",
|
476 |
+
" else:\n",
|
477 |
+
" if conseq_frames > 0:\n",
|
478 |
+
" conseq_frames = 0\n",
|
479 |
+
" end_time = hours + \":\" + minutes + \":\" + seconds\n",
|
480 |
+
"\n",
|
481 |
+
" # Printing time intervals in which baton was detected\n",
|
482 |
+
" res.append(start_time + \" to \" + end_time)\n",
|
483 |
+
" start_time = \"\"\n",
|
484 |
+
" end_time = \"\"\n",
|
485 |
+
"\n",
|
486 |
+
" current_frame += 1\n",
|
487 |
+
" cap.release()\n",
|
488 |
+
"\n",
|
489 |
+
" return \"\\n\".join(res)\n",
|
490 |
+
"\n",
|
491 |
+
" except Exception as e:\n",
|
492 |
+
"\n",
|
493 |
+
" return e"
|
494 |
+
]
|
495 |
+
},
|
496 |
+
{
|
497 |
+
"cell_type": "code",
|
498 |
+
"execution_count": null,
|
499 |
+
"metadata": {
|
500 |
+
"colab": {
|
501 |
+
"base_uri": "https://localhost:8080/",
|
502 |
+
"height": 626
|
503 |
+
},
|
504 |
+
"id": "uSBK_3VBJQmC",
|
505 |
+
"outputId": "30c43f6f-72c7-4416-db52-741064696ed1"
|
506 |
+
},
|
507 |
+
"outputs": [
|
508 |
+
{
|
509 |
+
"name": "stdout",
|
510 |
+
"output_type": "stream",
|
511 |
+
"text": [
|
512 |
+
"Setting queue=True in a Colab notebook requires sharing enabled. Setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n",
|
513 |
+
"\n",
|
514 |
+
"Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n",
|
515 |
+
"Running on public URL: https://7b4faa7028f75417d8.gradio.live\n",
|
516 |
+
"\n",
|
517 |
+
"This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
|
518 |
+
]
|
519 |
+
},
|
520 |
+
{
|
521 |
+
"data": {
|
522 |
+
"text/html": [
|
523 |
+
"<div><iframe src=\"https://7b4faa7028f75417d8.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
524 |
+
],
|
525 |
+
"text/plain": [
|
526 |
+
"<IPython.core.display.HTML object>"
|
527 |
+
]
|
528 |
+
},
|
529 |
+
"metadata": {},
|
530 |
+
"output_type": "display_data"
|
531 |
+
},
|
532 |
+
{
|
533 |
+
"data": {
|
534 |
+
"text/plain": []
|
535 |
+
},
|
536 |
+
"execution_count": 9,
|
537 |
+
"metadata": {},
|
538 |
+
"output_type": "execute_result"
|
539 |
+
}
|
540 |
+
],
|
541 |
+
"source": [
|
542 |
+
"with gr.Blocks() as demo:\n",
|
543 |
+
"\n",
|
544 |
+
" video_path = gr.Textbox(label = \"Enter Path to Video\")\n",
|
545 |
+
" #openai_keys = gr.Textbox(label = \"Enter your OpenAI Key\")\n",
|
546 |
+
" weights = gr.Textbox(label = \"Enter Path to YOLOv8 Weights\")\n",
|
547 |
+
" #sentences = gr.Textbox(label = \"Sentences Detected\")\n",
|
548 |
+
" batons = gr.Textbox(label = \"Batons Detected\")\n",
|
549 |
+
" btn = gr.Button(value = \"Process Video\")\n",
|
550 |
+
" btn.click(process_video, inputs = [video_path, weights], outputs = batons)\n",
|
551 |
+
"\n",
|
552 |
+
"demo.launch()"
|
553 |
+
]
|
554 |
+
},
|
555 |
+
{
|
556 |
+
"cell_type": "code",
|
557 |
+
"execution_count": null,
|
558 |
+
"metadata": {
|
559 |
+
"id": "Iqdjv7QowrZW"
|
560 |
+
},
|
561 |
+
"outputs": [],
|
562 |
+
"source": [
|
563 |
+
"/content/drive/MyDrive/Spark Project/Test_Video.mp4"
|
564 |
+
]
|
565 |
+
},
|
566 |
+
{
|
567 |
+
"cell_type": "code",
|
568 |
+
"execution_count": null,
|
569 |
+
"metadata": {
|
570 |
+
"id": "v9-4b-gfrbwa"
|
571 |
+
},
|
572 |
+
"outputs": [],
|
573 |
+
"source": [
|
574 |
+
"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"
|
575 |
+
]
|
576 |
+
},
|
577 |
+
{
|
578 |
+
"cell_type": "code",
|
579 |
+
"execution_count": null,
|
580 |
+
"metadata": {
|
581 |
+
"id": "nPh9wSJvwvAt"
|
582 |
+
},
|
583 |
+
"outputs": [],
|
584 |
+
"source": [
|
585 |
+
"/content/drive/MyDrive/Spark Project/Data (For YOLOv8 Training)/Option 3 - Roboflow (60 Images)/YOLOv8 Best Weights.pt"
|
586 |
+
]
|
587 |
+
},
|
588 |
+
{
|
589 |
+
"cell_type": "code",
|
590 |
+
"execution_count": null,
|
591 |
+
"metadata": {
|
592 |
+
"id": "CvuZJI3-LGOU"
|
593 |
+
},
|
594 |
+
"outputs": [],
|
595 |
+
"source": [
|
596 |
+
"process_video(\"/content/drive/MyDrive/Spark Project/Test_Video.mp4\", \"/content/drive/MyDrive/Spark Project/Data (For YOLOv8 Training)/Option 3 - Roboflow (60 Images)/YOLOv8 Best Weights.pt\")"
|
597 |
+
]
|
598 |
+
},
|
599 |
+
{
|
600 |
+
"cell_type": "code",
|
601 |
+
"execution_count": null,
|
602 |
+
"metadata": {
|
603 |
+
"id": "t7ZZYQp_tbN4"
|
604 |
+
},
|
605 |
+
"outputs": [],
|
606 |
+
"source": [
|
607 |
+
"a = video_transcription(\"/content/drive/MyDrive/Spark Project/Test_Video.mp4\")\n",
|
608 |
+
"a"
|
609 |
+
]
|
610 |
+
},
|
611 |
+
{
|
612 |
+
"cell_type": "code",
|
613 |
+
"execution_count": null,
|
614 |
+
"metadata": {
|
615 |
+
"colab": {
|
616 |
+
"base_uri": "https://localhost:8080/"
|
617 |
+
},
|
618 |
+
"id": "mtv7izc3HQHP",
|
619 |
+
"outputId": "d7fed4ac-3d97-4580-bd21-f698f84f9615"
|
620 |
+
},
|
621 |
+
"outputs": [
|
622 |
+
{
|
623 |
+
"name": "stdout",
|
624 |
+
"output_type": "stream",
|
625 |
+
"text": [
|
626 |
+
"Collecting ultralytics\n",
|
627 |
+
" Downloading ultralytics-8.0.214-py3-none-any.whl (645 kB)\n",
|
628 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m645.5/645.5 kB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
629 |
+
"\u001b[?25hRequirement already satisfied: matplotlib>=3.3.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (3.7.1)\n",
|
630 |
+
"Requirement already satisfied: numpy>=1.22.2 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (1.23.5)\n",
|
631 |
+
"Requirement already satisfied: opencv-python>=4.6.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (4.8.0.76)\n",
|
632 |
+
"Requirement already satisfied: pillow>=7.1.2 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (9.4.0)\n",
|
633 |
+
"Requirement already satisfied: pyyaml>=5.3.1 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (6.0.1)\n",
|
634 |
+
"Requirement already satisfied: requests>=2.23.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (2.31.0)\n",
|
635 |
+
"Requirement already satisfied: scipy>=1.4.1 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (1.11.3)\n",
|
636 |
+
"Requirement already satisfied: torch>=1.8.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (2.1.0+cu118)\n",
|
637 |
+
"Requirement already satisfied: torchvision>=0.9.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (0.16.0+cu118)\n",
|
638 |
+
"Requirement already satisfied: tqdm>=4.64.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (4.66.1)\n",
|
639 |
+
"Requirement already satisfied: pandas>=1.1.4 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (1.5.3)\n",
|
640 |
+
"Requirement already satisfied: seaborn>=0.11.0 in /usr/local/lib/python3.10/dist-packages (from ultralytics) (0.12.2)\n",
|
641 |
+
"Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from ultralytics) (5.9.5)\n",
|
642 |
+
"Requirement already satisfied: py-cpuinfo in /usr/local/lib/python3.10/dist-packages (from ultralytics) (9.0.0)\n",
|
643 |
+
"Collecting thop>=0.1.1 (from ultralytics)\n",
|
644 |
+
" Downloading thop-0.1.1.post2209072238-py3-none-any.whl (15 kB)\n",
|
645 |
+
"Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics) (1.2.0)\n",
|
646 |
+
"Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics) (0.12.1)\n",
|
647 |
+
"Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics) (4.44.3)\n",
|
648 |
+
"Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics) (1.4.5)\n",
|
649 |
+
"Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics) (23.2)\n",
|
650 |
+
"Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics) (3.1.1)\n",
|
651 |
+
"Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=3.3.0->ultralytics) (2.8.2)\n",
|
652 |
+
"Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas>=1.1.4->ultralytics) (2023.3.post1)\n",
|
653 |
+
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->ultralytics) (3.3.2)\n",
|
654 |
+
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->ultralytics) (3.4)\n",
|
655 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->ultralytics) (2.0.7)\n",
|
656 |
+
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.23.0->ultralytics) (2023.7.22)\n",
|
657 |
+
"Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch>=1.8.0->ultralytics) (3.13.1)\n",
|
658 |
+
"Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch>=1.8.0->ultralytics) (4.5.0)\n",
|
659 |
+
"Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch>=1.8.0->ultralytics) (1.12)\n",
|
660 |
+
"Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch>=1.8.0->ultralytics) (3.2.1)\n",
|
661 |
+
"Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.8.0->ultralytics) (3.1.2)\n",
|
662 |
+
"Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch>=1.8.0->ultralytics) (2023.6.0)\n",
|
663 |
+
"Requirement already satisfied: triton==2.1.0 in /usr/local/lib/python3.10/dist-packages (from torch>=1.8.0->ultralytics) (2.1.0)\n",
|
664 |
+
"Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib>=3.3.0->ultralytics) (1.16.0)\n",
|
665 |
+
"Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch>=1.8.0->ultralytics) (2.1.3)\n",
|
666 |
+
"Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.8.0->ultralytics) (1.3.0)\n",
|
667 |
+
"Installing collected packages: thop, ultralytics\n",
|
668 |
+
"Successfully installed thop-0.1.1.post2209072238 ultralytics-8.0.214\n"
|
669 |
+
]
|
670 |
+
}
|
671 |
+
],
|
672 |
+
"source": [
|
673 |
+
"!pip install ultralytics"
|
674 |
+
]
|
675 |
+
}
|
676 |
+
],
|
677 |
+
"metadata": {
|
678 |
+
"accelerator": "GPU",
|
679 |
+
"colab": {
|
680 |
+
"gpuType": "T4",
|
681 |
+
"provenance": []
|
682 |
+
},
|
683 |
+
"kernelspec": {
|
684 |
+
"display_name": "Python 3 (ipykernel)",
|
685 |
+
"language": "python",
|
686 |
+
"name": "python3"
|
687 |
+
},
|
688 |
+
"language_info": {
|
689 |
+
"codemirror_mode": {
|
690 |
+
"name": "ipython",
|
691 |
+
"version": 3
|
692 |
+
},
|
693 |
+
"file_extension": ".py",
|
694 |
+
"mimetype": "text/x-python",
|
695 |
+
"name": "python",
|
696 |
+
"nbconvert_exporter": "python",
|
697 |
+
"pygments_lexer": "ipython3",
|
698 |
+
"version": "3.8.18"
|
699 |
+
}
|
700 |
+
},
|
701 |
+
"nbformat": 4,
|
702 |
+
"nbformat_minor": 4
|
703 |
+
}
|
Spark_Deployment_Final.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
YOLOv8 Best We.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b52983e5d674161bfd0a252daf4dfc9d3371e9ad09b66bd42e9a22cb25a4b809
|
3 |
+
size 6268825
|
__MACOSX/._example 3 copy.mp4
ADDED
Binary file (326 Bytes). View file
|
|
__MACOSX/._example 3.mp4
ADDED
Binary file (326 Bytes). View file
|
|
app.py
ADDED
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import whisper
|
2 |
+
import cv2
|
3 |
+
import os
|
4 |
+
import urllib.request
|
5 |
+
from PIL import Image
|
6 |
+
from ultralytics import YOLO
|
7 |
+
import torch
|
8 |
+
import matplotlib.pyplot as plt
|
9 |
+
from tqdm import tqdm
|
10 |
+
from transformers import pipeline
|
11 |
+
import moviepy.editor as mp
|
12 |
+
import json
|
13 |
+
import re
|
14 |
+
import gradio as gr
|
15 |
+
from openai import OpenAI
|
16 |
+
|
17 |
+
from langchain.llms import OpenAI
|
18 |
+
from langchain.prompts import PromptTemplate
|
19 |
+
from langchain.chains import LLMChain
|
20 |
+
|
21 |
+
from langchain.chat_models import ChatOpenAI
|
22 |
+
from langchain.prompts import HumanMessagePromptTemplate
|
23 |
+
from langchain.schema.messages import SystemMessage
|
24 |
+
from langchain.prompts import ChatPromptTemplate
|
25 |
+
|
26 |
+
def video_transcription(video_path):
|
27 |
+
model = whisper.load_model('medium')
|
28 |
+
transcript = model.transcribe(video_path, verbose = True, language = 'en')
|
29 |
+
print(transcript)
|
30 |
+
|
31 |
+
return json.dumps(transcript)
|
32 |
+
|
33 |
+
def action_detection(json_object, openai_key):
|
34 |
+
transcript = json.loads(json_object)
|
35 |
+
transcript_string = ''
|
36 |
+
for segments in transcript['segments']:
|
37 |
+
transcript_string+=str(segments['text']+'\n')
|
38 |
+
|
39 |
+
chunks = []
|
40 |
+
output = {}
|
41 |
+
count = 0
|
42 |
+
split_transcript = transcript_string.split("\n")
|
43 |
+
num_lines = len(split_transcript)
|
44 |
+
num_chars = 0
|
45 |
+
i = 0
|
46 |
+
prev = 0
|
47 |
+
|
48 |
+
while i < num_lines:
|
49 |
+
num_chars+=len(split_transcript[i])
|
50 |
+
if num_chars>=16000:
|
51 |
+
chunks.append("\n".join(split_transcript[prev:i]))
|
52 |
+
prev = i
|
53 |
+
num_chars = 0
|
54 |
+
i+=1
|
55 |
+
if i == num_lines:
|
56 |
+
chunks.append("\n".join(split_transcript[prev:i]))
|
57 |
+
|
58 |
+
# client = OpenAI(api_key = openai_key)
|
59 |
+
llm = OpenAI(openai_api_key=openai_key, model="gpt-4")
|
60 |
+
chat_template = ChatPromptTemplate.from_messages(
|
61 |
+
[
|
62 |
+
SystemMessage(
|
63 |
+
content=(
|
64 |
+
"You are an AI system specialized in detecting planning issues, critiquing plans, and analyzing conversations between police officers regarding how to disperse."
|
65 |
+
"Additionally, identify any instances suggesting 1st Amendment violations, criticizing the lack of a plan, and aggressive comments. Transcript:\n\n{transcript_}\n\n."
|
66 |
+
"Give response only in the json format for example: \{\"1\": \"What should we do now. I don't have a clue?\", \"2\": \"what the fuck is this\", \"3\":\"Beat the fuck out of them\"\}."
|
67 |
+
"There can be multiple instances, find out all of them. If you do not find anything just return {\"None\":\"None\"}"
|
68 |
+
)
|
69 |
+
),
|
70 |
+
HumanMessagePromptTemplate.from_template("{transcript_}"),
|
71 |
+
]
|
72 |
+
)
|
73 |
+
|
74 |
+
|
75 |
+
for i in chunks:
|
76 |
+
prompt = PromptTemplate.from_template(
|
77 |
+
"You are an AI system specialized in detecting planning issues, critiquing plans, and analyzing conversations between police officers regarding how to disperse. Additionally, identify any instances suggesting 1st Amendment violations, criticizing the lack of a plan, and aggressive comments. Transcript:\n\n{i}\n\n. Give response only in the json format for example: \{\"1\": \"What should we do now. I don't have a clue?\", \"2\": \"what the fuck is this\", \"3\":\"Beat the fuck out of them\"\}. There can be multiple instances, find out all of them. If you do not find anything just return {\"None\":\"None\"}"
|
78 |
+
)
|
79 |
+
|
80 |
+
llm = ChatOpenAI(openai_api_key=openai_key)
|
81 |
+
p = chat_template.format_messages(transcript_=i)
|
82 |
+
gpt_output = llm(p).content
|
83 |
+
|
84 |
+
# print(gpt_output)
|
85 |
+
# gpt_output = completion.choices[0].message.content
|
86 |
+
# print(gpt_output)
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
|
91 |
+
gpt_output = dict(json.loads(gpt_output))
|
92 |
+
for j in gpt_output.values():
|
93 |
+
output[count] = j
|
94 |
+
count+=1
|
95 |
+
|
96 |
+
sent_with_time = []
|
97 |
+
|
98 |
+
for sentence_to_search in output.values():
|
99 |
+
pattern = re.compile(re.escape(sentence_to_search), re.IGNORECASE)
|
100 |
+
|
101 |
+
matching_entries = [entry for entry in transcript['segments'] if re.search(pattern, entry['text'])]
|
102 |
+
|
103 |
+
if matching_entries:
|
104 |
+
for entry in matching_entries:
|
105 |
+
hours_s, remainder = divmod(entry['start'], 3600)
|
106 |
+
minutes_s, seconds_s = divmod(remainder, 60)
|
107 |
+
hours_s = str(int(hours_s)).zfill(2)
|
108 |
+
minutes_s = str(int(minutes_s)).zfill(2)
|
109 |
+
seconds_s = str(int(seconds_s)).zfill(2)
|
110 |
+
|
111 |
+
|
112 |
+
hours_e, remainder = divmod(entry['end'], 3600)
|
113 |
+
minutes_e, seconds_e = divmod(remainder, 60)
|
114 |
+
hours_e = str(int(hours_e)).zfill(2)
|
115 |
+
minutes_e = str(int(minutes_e)).zfill(2)
|
116 |
+
seconds_e = str(int(seconds_e)).zfill(2)
|
117 |
+
|
118 |
+
sent_with_time.append(sentence_to_search + ' Start Time: ' + str(hours_s) + ":" + str(minutes_s) + ":" + str(seconds_s) + ' End Time: ' + str(hours_e) + ":" + str(minutes_e) + ":" + str(seconds_e))
|
119 |
+
|
120 |
+
return "\n".join(sent_with_time)
|
121 |
+
|
122 |
+
def process_video(video_path, weights):
|
123 |
+
try:
|
124 |
+
# This code cell detects batons in the video
|
125 |
+
current_frame = 0
|
126 |
+
model = YOLO(weights)
|
127 |
+
cap = cv2.VideoCapture(video_path)
|
128 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
129 |
+
conseq_frames = 0
|
130 |
+
start_time = ""
|
131 |
+
end_time = ""
|
132 |
+
res = []
|
133 |
+
|
134 |
+
while True:
|
135 |
+
ret, frame = cap.read()
|
136 |
+
if not ret:
|
137 |
+
break
|
138 |
+
|
139 |
+
# Detecting baton on one frame per second
|
140 |
+
if current_frame % fps == 0:
|
141 |
+
currect_sec = current_frame/fps
|
142 |
+
|
143 |
+
# Model prediction on current frame
|
144 |
+
results = model(frame, verbose = False)
|
145 |
+
count = 0
|
146 |
+
classes = results[0].boxes.data
|
147 |
+
|
148 |
+
# Formatting the time for printing
|
149 |
+
hours, remainder = divmod(currect_sec, 3600)
|
150 |
+
minutes, seconds = divmod(remainder, 60)
|
151 |
+
hours = str(int(hours)).zfill(2)
|
152 |
+
minutes = str(int(minutes)).zfill(2)
|
153 |
+
seconds = str(int(seconds)).zfill(2)
|
154 |
+
|
155 |
+
for i in classes:
|
156 |
+
|
157 |
+
# Checking if baton is detected (i.e. if the class corresponding to baton is 1 or not)
|
158 |
+
if float(i[5]) == 1:
|
159 |
+
count+=1
|
160 |
+
|
161 |
+
# Marking the start_time if this is the first consecutive frame a baton is detected in
|
162 |
+
if count >= 1:
|
163 |
+
conseq_frames+=1
|
164 |
+
if conseq_frames == 1:
|
165 |
+
start_time = hours + ":" + minutes + ":" + seconds
|
166 |
+
|
167 |
+
# Marking the end time if after one or multiple consecutive frames of detection, a baton is not detected
|
168 |
+
else:
|
169 |
+
if conseq_frames > 0:
|
170 |
+
conseq_frames = 0
|
171 |
+
end_time = hours + ":" + minutes + ":" + seconds
|
172 |
+
|
173 |
+
# Printing time intervals in which baton was detected
|
174 |
+
res.append(start_time + " to " + end_time)
|
175 |
+
start_time = ""
|
176 |
+
end_time = ""
|
177 |
+
|
178 |
+
current_frame += 1
|
179 |
+
cap.release()
|
180 |
+
|
181 |
+
return "\n".join(res)
|
182 |
+
|
183 |
+
except Exception as e:
|
184 |
+
|
185 |
+
return e
|
186 |
+
|
187 |
+
# def all_funcs(openai_key,video_path, yolo_weights, pr = gr.Progress(track_tqdm = True)):
|
188 |
+
|
189 |
+
|
190 |
+
# video_path = video_path[0].split('/')[-1]
|
191 |
+
# yolo_weights = yolo_weights[0].split('/')[-1]
|
192 |
+
# transcript = video_transcription(video_path)
|
193 |
+
# sentences = action_detection(transcript, openai_key)
|
194 |
+
# batons = process_video(video_path, yolo_weights)
|
195 |
+
|
196 |
+
# print("ALL FUNC Executed without errors")
|
197 |
+
|
198 |
+
# return sentences, batons
|
199 |
+
|
200 |
+
import zipfile
|
201 |
+
import smtplib
|
202 |
+
import ssl
|
203 |
+
from email.message import EmailMessage
|
204 |
+
|
205 |
+
def all_funcs(openai_key, zip_path, yolo_weights, email, pr = gr.Progress(track_tqdm = True)):
|
206 |
+
|
207 |
+
sentences = {}
|
208 |
+
batons = {}
|
209 |
+
count = 1
|
210 |
+
|
211 |
+
print(zip_path)
|
212 |
+
with zipfile.ZipFile(zip_path[0].split("/")[-1], "r") as zip_ref:
|
213 |
+
for filename in zip_ref.namelist():
|
214 |
+
# Inn 2 lines mein error aa sakta hai
|
215 |
+
|
216 |
+
zip_ref.extract(filename)
|
217 |
+
# video_path = filename[0].split('/')[-1]
|
218 |
+
video_path = filename
|
219 |
+
print(video_path)
|
220 |
+
|
221 |
+
yolo_weights = yolo_weights
|
222 |
+
print(yolo_weights)
|
223 |
+
try:
|
224 |
+
transcript = video_transcription(video_path)
|
225 |
+
print(transcript)
|
226 |
+
video_name = "Video " + str(count)
|
227 |
+
sentences[video_name] = action_detection(transcript, openai_key)
|
228 |
+
print(sentences[video_name])
|
229 |
+
batons[video_name] = process_video(video_path, yolo_weights[0])
|
230 |
+
print("batons ", batons)
|
231 |
+
count+=1
|
232 |
+
except Exception as e:
|
233 |
+
print(e)
|
234 |
+
continue
|
235 |
+
|
236 |
+
email_sender = 'bodycam1211@gmail.com'
|
237 |
+
email_password = 'evmt luaz mgoi iapl'
|
238 |
+
email_receiver = email
|
239 |
+
|
240 |
+
# Set the subject and body of the email
|
241 |
+
subject = 'Timestamps Detection Complete'
|
242 |
+
|
243 |
+
result = ""
|
244 |
+
for i in sentences.keys():
|
245 |
+
result = result + i + "\n"
|
246 |
+
result = "-" + result + sentences[i] + "\n"
|
247 |
+
result = result + "Batons time stamp: " + "\n"
|
248 |
+
result = result + batons[i] + "\n\n"
|
249 |
+
|
250 |
+
body = "Here are the results of your detected timestamps:\n" + result
|
251 |
+
|
252 |
+
em = EmailMessage()
|
253 |
+
em['From'] = email_sender
|
254 |
+
em['To'] = email_receiver
|
255 |
+
em['Subject'] = subject
|
256 |
+
em.set_content(body)
|
257 |
+
|
258 |
+
# Add SSL (layer of security)
|
259 |
+
context = ssl.create_default_context()
|
260 |
+
|
261 |
+
# Log in and send the email
|
262 |
+
with smtplib.SMTP_SSL('smtp.gmail.com', 465, context=context) as smtp:
|
263 |
+
smtp.login(email_sender, email_password)
|
264 |
+
smtp.sendmail(email_sender, email_receiver, em.as_string())
|
265 |
+
|
266 |
+
print("ALL FUNC Executed without errors")
|
267 |
+
|
268 |
+
return sentences, batons
|
269 |
+
|
270 |
+
|
271 |
+
btn = gr.Interface(
|
272 |
+
fn = all_funcs,
|
273 |
+
inputs = ["text", gr.Files(label = "Select Zip File"), gr.Files(label = "Select YOLOv8 Weights File"), "text"],
|
274 |
+
outputs=[gr.Textbox(label = "Audio Analysis Time Stamps", lines = 20), gr.Textbox(label = "Baton Detection Timestamps", lines = 20)]
|
275 |
+
)
|
276 |
+
|
277 |
+
btn.launch(server_name="0.0.0.0", server_port=4000)
|
requirements.txt
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiofiles==23.2.1
|
2 |
+
aiohttp==3.9.1
|
3 |
+
aiosignal==1.3.1
|
4 |
+
altair==5.1.2
|
5 |
+
annotated-types==0.6.0
|
6 |
+
anyio==3.7.1
|
7 |
+
appnope==0.1.3
|
8 |
+
asttokens==2.4.1
|
9 |
+
async-timeout==4.0.3
|
10 |
+
attrs==23.1.0
|
11 |
+
certifi==2023.11.17
|
12 |
+
charset-normalizer==3.3.2
|
13 |
+
click==8.1.7
|
14 |
+
colorama==0.4.6
|
15 |
+
comm==0.2.0
|
16 |
+
contourpy==1.2.0
|
17 |
+
cycler==0.12.1
|
18 |
+
Cython==3.0.6
|
19 |
+
dataclasses-json==0.6.3
|
20 |
+
debugpy==1.8.0
|
21 |
+
decorator==4.4.2
|
22 |
+
distro==1.8.0
|
23 |
+
exceptiongroup==1.2.0
|
24 |
+
executing==2.0.1
|
25 |
+
fastapi==0.104.1
|
26 |
+
ffmpy==0.3.1
|
27 |
+
filelock==3.13.1
|
28 |
+
fonttools==4.45.1
|
29 |
+
frozenlist==1.4.0
|
30 |
+
fsspec==2023.10.0
|
31 |
+
gradio==4.7.1
|
32 |
+
gradio_client==0.7.0
|
33 |
+
h11==0.14.0
|
34 |
+
httpcore==1.0.2
|
35 |
+
httpx==0.25.2
|
36 |
+
huggingface-hub==0.19.4
|
37 |
+
idna==3.6
|
38 |
+
imageio==2.33.0
|
39 |
+
imageio-ffmpeg==0.4.9
|
40 |
+
importlib-resources==6.1.1
|
41 |
+
ipykernel==6.27.1
|
42 |
+
ipython==8.18.1
|
43 |
+
jedi==0.19.1
|
44 |
+
Jinja2==3.1.2
|
45 |
+
jsonpatch==1.33
|
46 |
+
jsonpointer==2.4
|
47 |
+
jsonschema==4.20.0
|
48 |
+
jsonschema-specifications==2023.11.1
|
49 |
+
jupyter_client==8.6.0
|
50 |
+
jupyter_core==5.5.0
|
51 |
+
kiwisolver==1.4.5
|
52 |
+
langchain==0.0.345
|
53 |
+
langchain-core==0.0.9
|
54 |
+
langsmith==0.0.69
|
55 |
+
llvmlite==0.41.1
|
56 |
+
markdown-it-py==3.0.0
|
57 |
+
MarkupSafe==2.1.3
|
58 |
+
marshmallow==3.20.1
|
59 |
+
matplotlib==3.8.2
|
60 |
+
matplotlib-inline==0.1.6
|
61 |
+
mdurl==0.1.2
|
62 |
+
more-itertools==10.1.0
|
63 |
+
moviepy==1.0.3
|
64 |
+
mpmath==1.3.0
|
65 |
+
multidict==6.0.4
|
66 |
+
mypy-extensions==1.0.0
|
67 |
+
nest-asyncio==1.5.8
|
68 |
+
networkx==3.2.1
|
69 |
+
numba==0.58.1
|
70 |
+
numpy==1.26.2
|
71 |
+
openai==1.3.5
|
72 |
+
openai-whisper==20231117
|
73 |
+
opencv-python==4.8.1.78
|
74 |
+
orjson==3.9.10
|
75 |
+
packaging==23.2
|
76 |
+
pandas==2.1.3
|
77 |
+
parso==0.8.3
|
78 |
+
pexpect==4.9.0
|
79 |
+
Pillow==10.1.0
|
80 |
+
platformdirs==4.0.0
|
81 |
+
proglog==0.1.10
|
82 |
+
prompt-toolkit==3.0.41
|
83 |
+
psutil==5.9.6
|
84 |
+
ptyprocess==0.7.0
|
85 |
+
pure-eval==0.2.2
|
86 |
+
py-cpuinfo==9.0.0
|
87 |
+
pydantic==2.5.2
|
88 |
+
pydantic_core==2.14.5
|
89 |
+
pydub==0.25.1
|
90 |
+
Pygments==2.17.2
|
91 |
+
pyparsing==3.1.1
|
92 |
+
python-dateutil==2.8.2
|
93 |
+
python-multipart==0.0.6
|
94 |
+
pytz==2023.3.post1
|
95 |
+
PyYAML==6.0.1
|
96 |
+
pyzmq==25.1.1
|
97 |
+
referencing==0.31.0
|
98 |
+
regex==2023.10.3
|
99 |
+
requests==2.31.0
|
100 |
+
rich==13.7.0
|
101 |
+
rpds-py==0.13.1
|
102 |
+
safetensors==0.4.1
|
103 |
+
scipy==1.11.4
|
104 |
+
seaborn==0.13.0
|
105 |
+
semantic-version==2.10.0
|
106 |
+
shellingham==1.5.4
|
107 |
+
six==1.16.0
|
108 |
+
sniffio==1.3.0
|
109 |
+
SQLAlchemy==2.0.23
|
110 |
+
stack-data==0.6.3
|
111 |
+
starlette==0.27.0
|
112 |
+
sympy==1.12
|
113 |
+
tenacity==8.2.3
|
114 |
+
thop==0.1.1.post2209072238
|
115 |
+
tiktoken==0.5.1
|
116 |
+
tokenizers==0.15.0
|
117 |
+
tomlkit==0.12.0
|
118 |
+
toolz==0.12.0
|
119 |
+
torch==2.1.1
|
120 |
+
torchvision==0.16.1
|
121 |
+
tornado==6.3.3
|
122 |
+
tqdm==4.66.1
|
123 |
+
traitlets==5.14.0
|
124 |
+
transformers==4.35.2
|
125 |
+
typer==0.9.0
|
126 |
+
typing-inspect==0.9.0
|
127 |
+
typing_extensions==4.8.0
|
128 |
+
tzdata==2023.3
|
129 |
+
ultralytics==8.0.219
|
130 |
+
urllib3==2.1.0
|
131 |
+
uvicorn==0.24.0.post1
|
132 |
+
wcwidth==0.2.12
|
133 |
+
websockets==11.0.3
|
134 |
+
yarl==1.9.3
|
test.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9fbe815458a448cfea56c5d51115788d4e34374a2ed2b3abd974dcc041ca49c3
|
3 |
+
size 18518382
|
zip_file_with_email.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import zipfile
|
2 |
+
import smtplib
|
3 |
+
import ssl
|
4 |
+
from email.message import EmailMessage
|
5 |
+
|
6 |
+
def all_funcs(openai_key, zip_path, yolo_weights, email, pr = gr.Progress(track_tqdm = True)):
|
7 |
+
|
8 |
+
sentences = {}
|
9 |
+
batons = {}
|
10 |
+
count = 1
|
11 |
+
|
12 |
+
with zipfile.ZipFile(zip_path, "r") as zip_ref:
|
13 |
+
for filename in zip_ref.namelist():
|
14 |
+
# Inn 2 lines mein error aa sakta hai
|
15 |
+
|
16 |
+
zip_ref.extract(filename)
|
17 |
+
video_path = filename[0].split('/')[-1]
|
18 |
+
|
19 |
+
yolo_weights = yolo_weights[0].split('/')[-1]
|
20 |
+
transcript = video_transcription(video_path)
|
21 |
+
video_name = "Video " + str(count)
|
22 |
+
sentences[video_name] = action_detection(transcript, openai_key)
|
23 |
+
batons[video_name] = process_video(video_path, yolo_weights)
|
24 |
+
count+=1
|
25 |
+
|
26 |
+
email_sender = 'bodycam1211@gmail.com'
|
27 |
+
email_password = 'evmt luaz mgoi iapl'
|
28 |
+
email_receiver = email
|
29 |
+
|
30 |
+
# Set the subject and body of the email
|
31 |
+
subject = 'Timestamps Detection Complete'
|
32 |
+
|
33 |
+
result = ""
|
34 |
+
for i in sentences.keys():
|
35 |
+
result = result + i + "\n"
|
36 |
+
result = result + sentences[i] + "\n"
|
37 |
+
result = result + batons[i] + "\n\n"
|
38 |
+
|
39 |
+
body = "Here are the results of your detected timestamps:\n" + result
|
40 |
+
|
41 |
+
em = EmailMessage()
|
42 |
+
em['From'] = email_sender
|
43 |
+
em['To'] = email_receiver
|
44 |
+
em['Subject'] = subject
|
45 |
+
em.set_content(body)
|
46 |
+
|
47 |
+
# Add SSL (layer of security)
|
48 |
+
context = ssl.create_default_context()
|
49 |
+
|
50 |
+
# Log in and send the email
|
51 |
+
with smtplib.SMTP_SSL('smtp.gmail.com', 465, context=context) as smtp:
|
52 |
+
smtp.login(email_sender, email_password)
|
53 |
+
smtp.sendmail(email_sender, email_receiver, em.as_string())
|
54 |
+
|
55 |
+
print("ALL FUNC Executed without errors")
|
56 |
+
|
57 |
+
return sentences, batons
|