Spaces:
Runtime error
Runtime error
init: proyect
Browse files- .DS_Store +0 -0
- app.py +34 -0
- requirements.txt +2 -0
- tokenizer/.DS_Store +0 -0
- tokenizer/merges.txt +0 -0
- tokenizer/preprocessor_config.json +19 -0
- tokenizer/special_tokens_map.json +1 -0
- tokenizer/tokenizer.json +0 -0
- tokenizer/tokenizer_config.json +1 -0
- tokenizer/vocab.json +0 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image
|
2 |
+
import gradio as gr
|
3 |
+
import requests
|
4 |
+
from transformers import CLIPProcessor, CLIPModel
|
5 |
+
|
6 |
+
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
7 |
+
processor = CLIPProcessor.from_pretrained("tokenizer")
|
8 |
+
|
9 |
+
def generate_answer(image):
|
10 |
+
|
11 |
+
clas = ['high_natural_light','neutral_natural_light','No_Light', 'artificial_lights','others']
|
12 |
+
classes = ['A picture of a living room filled with abundant natural light with a lot windows, without objects that prevent the light from passing through regardless of whether it is night','A picture of a living room with neutral natural light with few windows, without objects that prevent the light from passing through regardless of whether it is night','Nolights','A picture of a living room with Artificial lights like lamps or headlamps',"Objects or things that are not a room"]
|
13 |
+
inputs = processor(text=classes, images=image, return_tensors="pt", padding=True)
|
14 |
+
outputs = model(**inputs)
|
15 |
+
logits_per_image = outputs.logits_per_image
|
16 |
+
probs = logits_per_image.softmax(dim=1)
|
17 |
+
|
18 |
+
probabilities_list = probs.squeeze().tolist()
|
19 |
+
|
20 |
+
result_dict = {class_name: probability for class_name, probability in zip(clas, probabilities_list)}
|
21 |
+
|
22 |
+
return result_dict
|
23 |
+
|
24 |
+
image_input = gr.Image(type="pil", label="Upload Image")
|
25 |
+
|
26 |
+
iface = gr.Interface(
|
27 |
+
fn=generate_answer,
|
28 |
+
inputs=[image_input],
|
29 |
+
outputs="text",
|
30 |
+
title="Room Lightning Score",
|
31 |
+
description="Enter text and upload an image"
|
32 |
+
)
|
33 |
+
|
34 |
+
iface.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
transformers==4.37.2
|
2 |
+
accelerate
|
tokenizer/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
tokenizer/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer/preprocessor_config.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"crop_size": 224,
|
3 |
+
"do_center_crop": true,
|
4 |
+
"do_normalize": true,
|
5 |
+
"do_resize": true,
|
6 |
+
"feature_extractor_type": "CLIPFeatureExtractor",
|
7 |
+
"image_mean": [
|
8 |
+
0.48145466,
|
9 |
+
0.4578275,
|
10 |
+
0.40821073
|
11 |
+
],
|
12 |
+
"image_std": [
|
13 |
+
0.26862954,
|
14 |
+
0.26130258,
|
15 |
+
0.27577711
|
16 |
+
],
|
17 |
+
"resample": 3,
|
18 |
+
"size": 224
|
19 |
+
}
|
tokenizer/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
|
tokenizer/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": "<|endoftext|>", "add_prefix_space": false, "errors": "replace", "do_lower_case": true, "name_or_path": "./clip_ViT_B_32/", "model_max_length": 77}
|
tokenizer/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|