Lin commited on
Commit
b85384a
1 Parent(s): f8c3dcc

Updated app

Browse files
Files changed (1) hide show
  1. app.py +33 -11
app.py CHANGED
@@ -1,15 +1,37 @@
1
  import numpy as np
2
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- def sepia(input_img):
5
- sepia_filter = np.array([
6
- [0.393, 0.769, 0.189],
7
- [0.349, 0.686, 0.168],
8
- [0.272, 0.534, 0.131]
9
- ])
10
- sepia_img = input_img.dot(sepia_filter.T)
11
- sepia_img /= sepia_img.max()
12
- return sepia_img
13
-
14
- demo = gr.Interface(sepia, gr.Image(), "image")
15
  demo.launch()
 
1
  import numpy as np
2
  import gradio as gr
3
+ import torch
4
+
5
+ from transformers import pipeline
6
+
7
+ model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()
8
+
9
+ # def sepia(input_img):
10
+ # sepia_filter = np.array([
11
+ # [0.393, 0.769, 0.189],
12
+ # [0.349, 0.686, 0.168],
13
+ # [0.272, 0.534, 0.131]
14
+ # ])
15
+ # sepia_img = input_img.dot(sepia_filter.T)
16
+ # sepia_img /= sepia_img.max()
17
+ # return sepia_img
18
+
19
+ # demo = gr.Interface(sepia, gr.Image(), "image")
20
+
21
+ # Download human-readable labels for ImageNet.
22
+ response = requests.get("https://git.io/JJkYN")
23
+ labels = response.text.split("\n")
24
+
25
+ def predict(inp):
26
+ inp = transforms.ToTensor()(inp).unsqueeze(0)
27
+ with torch.no_grad():
28
+ prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
29
+ confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
30
+ return confidences
31
+
32
+ gr.Interface(fn=predict,
33
+ inputs=gr.Image(type="pil"),
34
+ outputs=gr.Label(num_top_classes=3),
35
+ examples=["lion.jpg", "cheetah.jpg"]).launch()
36
 
 
 
 
 
 
 
 
 
 
 
 
37
  demo.launch()