AZIIIIIIIIZ commited on
Commit
26ac5ee
1 Parent(s): 981db78

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +136 -0
app.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # import gradio as gr
3
+
4
+ # # Use a pipeline as a high-level helper
5
+ # from transformers import pipeline
6
+
7
+ # # Use a pipeline as a high-level helper
8
+ # # Load model directly
9
+ # from transformers import AutoImageProcessor, AutoModelForImageClassification
10
+
11
+ # # processor = AutoImageProcessor.from_pretrained("AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
12
+ # # model = AutoModelForImageClassification.from_pretrained("AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
13
+ # pipe = pipeline("image-classification", model="AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
14
+
15
+
16
+ # # $ pip install gradio_client fastapi uvicorn
17
+
18
+ # import requests
19
+ # from PIL import Image
20
+ # from transformers import pipeline
21
+ # import io
22
+ # import base64
23
+
24
+ # Initialize the pipeline
25
+ # pipe = pipeline('image-classification')
26
+
27
+ # def load_image_from_path(image_path):
28
+ # return Image.open(image_path)
29
+
30
+ # def load_image_from_url(image_url):
31
+ # response = requests.get(image_url)
32
+ # return Image.open(io.BytesIO(response.content))
33
+
34
+ # def load_image_from_base64(base64_string):
35
+ # image_data = base64.b64decode(base64_string)
36
+ # return Image.open(io.BytesIO(image_data))
37
+
38
+ # def predict(image_input):
39
+ # if isinstance(image_input, str):
40
+ # if image_input.startswith('http'):
41
+ # image = load_image_from_url(image_input)
42
+ # elif image_input.startswith('/'):
43
+ # image = load_image_from_path(image_input)
44
+ # else:
45
+ # image = load_image_from_base64(image_input)
46
+ # elif isinstance(image_input, Image.Image):
47
+ # image = image_input
48
+ # else:
49
+ # raise ValueError("Incorrect format used for image. Should be an URL linking to an image, a base64 string, a local path, or a PIL image.")
50
+
51
+ # return pipe(image)
52
+
53
+
54
+ # def predict(image):
55
+ # return pipe(image)
56
+
57
+ # def main():
58
+ # # image_input = 'path_or_url_or_base64' # Update with actual input
59
+ # # output = predict(image_input)
60
+ # # print(output)
61
+
62
+ # demo = gr.Interface(
63
+ # fn=predict,
64
+ # inputs='image',
65
+ # outputs='text',
66
+ # )
67
+
68
+ # demo.launch()
69
+
70
+ # import requests
71
+ # import torch
72
+ # from PIL import Image
73
+ # from torchvision import transforms
74
+
75
+ # def predict(inp):
76
+ # inp = Image.fromarray(inp.astype("uint8"), "RGB")
77
+ # inp = transforms.ToTensor()(inp).unsqueeze(0)
78
+ # with torch.no_grad():
79
+ # prediction = torch.nn.functional.softmax(model(inp.to(device))[0], dim=0)
80
+ # return {labels[i]: float(prediction[i]) for i in range(1000)}
81
+
82
+
83
+ # inputs = gr.Image()
84
+ # outputs = gr.Label(num_top_classes=2)
85
+
86
+ # io = gr.Interface(
87
+ # fn=predict, inputs=inputs, outputs=outputs, examples=["dog.jpg"]
88
+ # )
89
+ # io.launch(inline=False, share=True)
90
+
91
+
92
+
93
+
94
+
95
+
96
+
97
+ # import gradio as gr
98
+ # from transformers import pipeline
99
+
100
+ # pipeline = pipeline("image-classification", model="AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
101
+
102
+ # def predict(image):
103
+ # predictions = pipeline(image)
104
+ # return {p["label"]: p["score"] for p in predictions}
105
+
106
+ # gr.Interface(
107
+ # predict,
108
+ # inputs=gr.inputs.Image(label="Upload Image", type="filepath"),
109
+ # outputs=gr.outputs.Label(num_top_classes=2),
110
+ # title="AI Generated? Or Not?",
111
+ # allow_flagging="manual"
112
+ # ).launch()
113
+
114
+
115
+
116
+ # if __name__ == "__main__":
117
+ # main()
118
+
119
+ import gradio as gr
120
+ from transformers import pipeline
121
+
122
+ pipeline = pipeline("image-classification", model="AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
123
+
124
+ def predict(input_img):
125
+ predictions = pipeline(input_img)
126
+ return input_img, {p["label"]: p["score"] for p in predictions}
127
+
128
+ gradio_app = gr.Interface(
129
+ predict,
130
+ inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
131
+ outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
132
+ title="Hot Dog? Or Not?",
133
+ )
134
+
135
+ if __name__ == "__main__":
136
+ gradio_app.launch()