Spaces:
Runtime error
Runtime error
Ubuntu
commited on
Commit
·
fdca025
1
Parent(s):
033a668
- .ipynb_checkpoints/README-checkpoint.md +13 -0
- .ipynb_checkpoints/app-checkpoint.py +37 -37
- app.py +37 -37
.ipynb_checkpoints/README-checkpoint.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Custom Object Detection
|
3 |
+
emoji: 🏃
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: yellow
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.27.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: cc
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
.ipynb_checkpoints/app-checkpoint.py
CHANGED
@@ -15,7 +15,40 @@ from PIL import Image
|
|
15 |
import PIL
|
16 |
|
17 |
HF_DATASETS_CACHE="./"
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
def video_identity(video,user_name,class_name,trainortest,ready):
|
21 |
if ready=='yes':
|
@@ -38,14 +71,7 @@ def video_identity(video,user_name,class_name,trainortest,ready):
|
|
38 |
label2id[class_name] = str(i)
|
39 |
id2label[str(i)] = class_name
|
40 |
|
41 |
-
|
42 |
-
def __init__(self, feature_extractor):
|
43 |
-
self.feature_extractor = feature_extractor
|
44 |
-
|
45 |
-
def __call__(self, batch):
|
46 |
-
encodings = self.feature_extractor([x[0] for x in batch], return_tensors='pt')
|
47 |
-
encodings['labels'] = torch.tensor([x[1] for x in batch], dtype=torch.long)
|
48 |
-
return encodings
|
49 |
feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k')
|
50 |
model = ViTForImageClassification.from_pretrained(
|
51 |
'google/vit-base-patch16-224-in21k',
|
@@ -54,33 +80,7 @@ def video_identity(video,user_name,class_name,trainortest,ready):
|
|
54 |
id2label=id2label
|
55 |
)
|
56 |
collator = ImageClassificationCollator(feature_extractor)
|
57 |
-
|
58 |
-
|
59 |
-
def __init__(self, model, lr: float = 2e-5, **kwargs):
|
60 |
-
super().__init__()
|
61 |
-
self.save_hyperparameters('lr', *list(kwargs))
|
62 |
-
self.model = model
|
63 |
-
self.forward = self.model.forward
|
64 |
-
self.val_acc = Accuracy(
|
65 |
-
task='multiclass' if model.config.num_labels > 2 else 'binary',
|
66 |
-
num_classes=model.config.num_labels
|
67 |
-
)
|
68 |
-
|
69 |
-
def training_step(self, batch, batch_idx):
|
70 |
-
outputs = self(**batch)
|
71 |
-
self.log(f"train_loss", outputs.loss)
|
72 |
-
return outputs.loss
|
73 |
-
|
74 |
-
def validation_step(self, batch, batch_idx):
|
75 |
-
outputs = self(**batch)
|
76 |
-
self.log(f"val_loss", outputs.loss)
|
77 |
-
acc = self.val_acc(outputs.logits.argmax(1), batch['labels'])
|
78 |
-
self.log(f"val_acc", acc, prog_bar=True)
|
79 |
-
return outputs.loss
|
80 |
-
|
81 |
-
def configure_optimizers(self):
|
82 |
-
return torch.optim.Adam(self.parameters(), lr=self.hparams.lr)
|
83 |
-
|
84 |
|
85 |
|
86 |
train_loader = DataLoader(train_ds, batch_size=2, collate_fn=collator, num_workers=8, shuffle=True)
|
@@ -94,7 +94,7 @@ def video_identity(video,user_name,class_name,trainortest,ready):
|
|
94 |
|
95 |
pl.seed_everything(42)
|
96 |
classifier = Classifier(model, lr=2e-5)
|
97 |
-
trainer = pl.Trainer(accelerator='
|
98 |
|
99 |
trainer.fit(classifier, train_loader, test_loader)
|
100 |
|
|
|
15 |
import PIL
|
16 |
|
17 |
HF_DATASETS_CACHE="./"
|
18 |
+
class ImageClassificationCollator:
|
19 |
+
def __init__(self, feature_extractor):
|
20 |
+
self.feature_extractor = feature_extractor
|
21 |
+
|
22 |
+
def __call__(self, batch):
|
23 |
+
encodings = self.feature_extractor([x[0] for x in batch], return_tensors='pt')
|
24 |
+
encodings['labels'] = torch.tensor([x[1] for x in batch], dtype=torch.long)
|
25 |
+
return encodings
|
26 |
+
class Classifier(pl.LightningModule):
|
27 |
+
|
28 |
+
def __init__(self, model, lr: float = 2e-5, **kwargs):
|
29 |
+
super().__init__()
|
30 |
+
self.save_hyperparameters('lr', *list(kwargs))
|
31 |
+
self.model = model
|
32 |
+
self.forward = self.model.forward
|
33 |
+
self.val_acc = Accuracy(
|
34 |
+
task='multiclass' if model.config.num_labels > 2 else 'binary',
|
35 |
+
num_classes=model.config.num_labels
|
36 |
+
)
|
37 |
+
|
38 |
+
def training_step(self, batch, batch_idx):
|
39 |
+
outputs = self(**batch)
|
40 |
+
self.log(f"train_loss", outputs.loss)
|
41 |
+
return outputs.loss
|
42 |
+
|
43 |
+
def validation_step(self, batch, batch_idx):
|
44 |
+
outputs = self(**batch)
|
45 |
+
self.log(f"val_loss", outputs.loss)
|
46 |
+
acc = self.val_acc(outputs.logits.argmax(1), batch['labels'])
|
47 |
+
self.log(f"val_acc", acc, prog_bar=True)
|
48 |
+
return outputs.loss
|
49 |
+
|
50 |
+
def configure_optimizers(self):
|
51 |
+
return torch.optim.Adam(self.parameters(), lr=self.hparams.lr)
|
52 |
|
53 |
def video_identity(video,user_name,class_name,trainortest,ready):
|
54 |
if ready=='yes':
|
|
|
71 |
label2id[class_name] = str(i)
|
72 |
id2label[str(i)] = class_name
|
73 |
|
74 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k')
|
76 |
model = ViTForImageClassification.from_pretrained(
|
77 |
'google/vit-base-patch16-224-in21k',
|
|
|
80 |
id2label=id2label
|
81 |
)
|
82 |
collator = ImageClassificationCollator(feature_extractor)
|
83 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
|
86 |
train_loader = DataLoader(train_ds, batch_size=2, collate_fn=collator, num_workers=8, shuffle=True)
|
|
|
94 |
|
95 |
pl.seed_everything(42)
|
96 |
classifier = Classifier(model, lr=2e-5)
|
97 |
+
trainer = pl.Trainer(accelerator='cpu', devices=1, precision=16, max_epochs=3)
|
98 |
|
99 |
trainer.fit(classifier, train_loader, test_loader)
|
100 |
|
app.py
CHANGED
@@ -15,7 +15,40 @@ from PIL import Image
|
|
15 |
import PIL
|
16 |
|
17 |
HF_DATASETS_CACHE="./"
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
def video_identity(video,user_name,class_name,trainortest,ready):
|
21 |
if ready=='yes':
|
@@ -38,14 +71,7 @@ def video_identity(video,user_name,class_name,trainortest,ready):
|
|
38 |
label2id[class_name] = str(i)
|
39 |
id2label[str(i)] = class_name
|
40 |
|
41 |
-
|
42 |
-
def __init__(self, feature_extractor):
|
43 |
-
self.feature_extractor = feature_extractor
|
44 |
-
|
45 |
-
def __call__(self, batch):
|
46 |
-
encodings = self.feature_extractor([x[0] for x in batch], return_tensors='pt')
|
47 |
-
encodings['labels'] = torch.tensor([x[1] for x in batch], dtype=torch.long)
|
48 |
-
return encodings
|
49 |
feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k')
|
50 |
model = ViTForImageClassification.from_pretrained(
|
51 |
'google/vit-base-patch16-224-in21k',
|
@@ -54,33 +80,7 @@ def video_identity(video,user_name,class_name,trainortest,ready):
|
|
54 |
id2label=id2label
|
55 |
)
|
56 |
collator = ImageClassificationCollator(feature_extractor)
|
57 |
-
|
58 |
-
|
59 |
-
def __init__(self, model, lr: float = 2e-5, **kwargs):
|
60 |
-
super().__init__()
|
61 |
-
self.save_hyperparameters('lr', *list(kwargs))
|
62 |
-
self.model = model
|
63 |
-
self.forward = self.model.forward
|
64 |
-
self.val_acc = Accuracy(
|
65 |
-
task='multiclass' if model.config.num_labels > 2 else 'binary',
|
66 |
-
num_classes=model.config.num_labels
|
67 |
-
)
|
68 |
-
|
69 |
-
def training_step(self, batch, batch_idx):
|
70 |
-
outputs = self(**batch)
|
71 |
-
self.log(f"train_loss", outputs.loss)
|
72 |
-
return outputs.loss
|
73 |
-
|
74 |
-
def validation_step(self, batch, batch_idx):
|
75 |
-
outputs = self(**batch)
|
76 |
-
self.log(f"val_loss", outputs.loss)
|
77 |
-
acc = self.val_acc(outputs.logits.argmax(1), batch['labels'])
|
78 |
-
self.log(f"val_acc", acc, prog_bar=True)
|
79 |
-
return outputs.loss
|
80 |
-
|
81 |
-
def configure_optimizers(self):
|
82 |
-
return torch.optim.Adam(self.parameters(), lr=self.hparams.lr)
|
83 |
-
|
84 |
|
85 |
|
86 |
train_loader = DataLoader(train_ds, batch_size=2, collate_fn=collator, num_workers=8, shuffle=True)
|
@@ -94,7 +94,7 @@ def video_identity(video,user_name,class_name,trainortest,ready):
|
|
94 |
|
95 |
pl.seed_everything(42)
|
96 |
classifier = Classifier(model, lr=2e-5)
|
97 |
-
trainer = pl.Trainer(accelerator='
|
98 |
|
99 |
trainer.fit(classifier, train_loader, test_loader)
|
100 |
|
|
|
15 |
import PIL
|
16 |
|
17 |
HF_DATASETS_CACHE="./"
|
18 |
+
class ImageClassificationCollator:
|
19 |
+
def __init__(self, feature_extractor):
|
20 |
+
self.feature_extractor = feature_extractor
|
21 |
+
|
22 |
+
def __call__(self, batch):
|
23 |
+
encodings = self.feature_extractor([x[0] for x in batch], return_tensors='pt')
|
24 |
+
encodings['labels'] = torch.tensor([x[1] for x in batch], dtype=torch.long)
|
25 |
+
return encodings
|
26 |
+
class Classifier(pl.LightningModule):
|
27 |
+
|
28 |
+
def __init__(self, model, lr: float = 2e-5, **kwargs):
|
29 |
+
super().__init__()
|
30 |
+
self.save_hyperparameters('lr', *list(kwargs))
|
31 |
+
self.model = model
|
32 |
+
self.forward = self.model.forward
|
33 |
+
self.val_acc = Accuracy(
|
34 |
+
task='multiclass' if model.config.num_labels > 2 else 'binary',
|
35 |
+
num_classes=model.config.num_labels
|
36 |
+
)
|
37 |
+
|
38 |
+
def training_step(self, batch, batch_idx):
|
39 |
+
outputs = self(**batch)
|
40 |
+
self.log(f"train_loss", outputs.loss)
|
41 |
+
return outputs.loss
|
42 |
+
|
43 |
+
def validation_step(self, batch, batch_idx):
|
44 |
+
outputs = self(**batch)
|
45 |
+
self.log(f"val_loss", outputs.loss)
|
46 |
+
acc = self.val_acc(outputs.logits.argmax(1), batch['labels'])
|
47 |
+
self.log(f"val_acc", acc, prog_bar=True)
|
48 |
+
return outputs.loss
|
49 |
+
|
50 |
+
def configure_optimizers(self):
|
51 |
+
return torch.optim.Adam(self.parameters(), lr=self.hparams.lr)
|
52 |
|
53 |
def video_identity(video,user_name,class_name,trainortest,ready):
|
54 |
if ready=='yes':
|
|
|
71 |
label2id[class_name] = str(i)
|
72 |
id2label[str(i)] = class_name
|
73 |
|
74 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k')
|
76 |
model = ViTForImageClassification.from_pretrained(
|
77 |
'google/vit-base-patch16-224-in21k',
|
|
|
80 |
id2label=id2label
|
81 |
)
|
82 |
collator = ImageClassificationCollator(feature_extractor)
|
83 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
|
86 |
train_loader = DataLoader(train_ds, batch_size=2, collate_fn=collator, num_workers=8, shuffle=True)
|
|
|
94 |
|
95 |
pl.seed_everything(42)
|
96 |
classifier = Classifier(model, lr=2e-5)
|
97 |
+
trainer = pl.Trainer(accelerator='cpu', devices=1, precision=16, max_epochs=3)
|
98 |
|
99 |
trainer.fit(classifier, train_loader, test_loader)
|
100 |
|