ArthurZ HF staff commited on
Commit
aec582a
1 Parent(s): a7c34ea

Update README.md (#1)

Browse files

- Update README.md (3b5939ceab4e75f5f40b9092544f96e06f4b1c80)

Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -51,7 +51,7 @@ import requests
51
  from transformers import SamModel, SamProcessor
52
 
53
  model = SamModel.from_pretrained("facebook/sam-vit-base")
54
- processsor = SamProcessor.from_pretrained("facebook/sam-vit-base")
55
 
56
  img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
57
  raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
@@ -60,7 +60,7 @@ input_points = [[[450, 600]]] # 2D localization of a window
60
 
61
 
62
  ```python
63
- inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to(device)
64
  outputs = model(**inputs)
65
  masks = processor.image_processor.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu())
66
  scores = outputs.iou_scores
@@ -76,7 +76,7 @@ which are all fed to the model.
76
  The pipeline is made for automatic mask generation. The following snippet demonstrates how easy you can run it (on any device! Simply feed the appropriate `points_per_batch` argument)
77
  ```python
78
  from transformers import pipeline
79
- generator = pipeline("automatic-mask-generation", device = 0, points_per_batch = 256)
80
  image_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
81
  outputs = generator(image_url, points_per_batch = 256)
82
  ```
 
51
  from transformers import SamModel, SamProcessor
52
 
53
  model = SamModel.from_pretrained("facebook/sam-vit-base")
54
+ processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
55
 
56
  img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
57
  raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
 
60
 
61
 
62
  ```python
63
+ inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to("cuda")
64
  outputs = model(**inputs)
65
  masks = processor.image_processor.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu())
66
  scores = outputs.iou_scores
 
76
  The pipeline is made for automatic mask generation. The following snippet demonstrates how easy you can run it (on any device! Simply feed the appropriate `points_per_batch` argument)
77
  ```python
78
  from transformers import pipeline
79
+ generator = pipeline("mask-generation", device = 0, points_per_batch = 256)
80
  image_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
81
  outputs = generator(image_url, points_per_batch = 256)
82
  ```