sagar007 commited on
Commit
dfdcd97
Β·
verified Β·
1 Parent(s): df96171

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import numpy as np
4
+ from PIL import Image
5
+ from transformers import AutoProcessor, CLIPSegForImageSegmentation
6
+
7
+ # Load the CLIPSeg model and processor
8
+ processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
9
+ model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
10
+
11
+ def segment_image(input_image, text_prompt):
12
+ # Preprocess the image
13
+ inputs = processor(text=[text_prompt], images=[input_image], padding="max_length", return_tensors="pt")
14
+
15
+ # Perform segmentation
16
+ with torch.no_grad():
17
+ outputs = model(**inputs)
18
+
19
+ # Get the predicted segmentation
20
+ preds = outputs.logits.squeeze().sigmoid()
21
+
22
+ # Convert the prediction to a PIL image
23
+ segmentation = (preds > 0.5).float()
24
+ segmentation_image = Image.fromarray((segmentation.numpy() * 255).astype(np.uint8))
25
+
26
+ return segmentation_image
27
+
28
+ # Create Gradio interface
29
+ iface = gr.Interface(
30
+ fn=segment_image,
31
+ inputs=[
32
+ gr.Image(type="pil", label="Input Image"),
33
+ gr.Textbox(label="Text Prompt", placeholder="Enter a description of what to segment...")
34
+ ],
35
+ outputs=gr.Image(type="pil", label="Segmentation Result"),
36
+ title="CLIPSeg Image Segmentation",
37
+ description="Upload an image and provide a text prompt to segment objects.",
38
+ examples=[
39
+ ["path/to/example_image1.jpg", "car"],
40
+ ["path/to/example_image2.jpg", "person"],
41
+ ]
42
+ )
43
+
44
+ # Launch the interface
45
+ iface.launch()