gpu support
Browse files- handler.py +1 -1
- test_handler_locally.py +16 -0
handler.py
CHANGED
@@ -11,7 +11,7 @@ class EndpointHandler:
|
|
11 |
self.processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
|
12 |
|
13 |
|
14 |
-
device = '
|
15 |
|
16 |
model = LlavaNextForConditionalGeneration.from_pretrained(
|
17 |
"llava-hf/llava-v1.6-mistral-7b-hf",
|
|
|
11 |
self.processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
|
12 |
|
13 |
|
14 |
+
device = 'gpu' if torch.cuda.is_available() else 'cpu'
|
15 |
|
16 |
model = LlavaNextForConditionalGeneration.from_pretrained(
|
17 |
"llava-hf/llava-v1.6-mistral-7b-hf",
|
test_handler_locally.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from handler import EndpointHandler
|
2 |
+
|
3 |
+
# init handler
|
4 |
+
my_handler = EndpointHandler(path=".")
|
5 |
+
|
6 |
+
# prepare sample payload
|
7 |
+
prompt = "Can you describe this picture focusing on specifics visual artifacts and ambiance (objects, colors, person, athmosphere..). Please stay concise only output keywords and concepts detected."
|
8 |
+
files = [
|
9 |
+
{"path": "https://media.rolex.com/image/upload/q_auto/f_auto/c_limit,w_2440/v1708384234/rolexcom/about-rolex/hub/about-rolex-hub-cover-aca202310cw-0002-portrait"}
|
10 |
+
]
|
11 |
+
|
12 |
+
# test the handler
|
13 |
+
results=my_handler({
|
14 |
+
'prompt': prompt,
|
15 |
+
'files': files
|
16 |
+
})
|