vishalkatheriya
commited on
Update inference.py
Browse files- inference.py +4 -3
inference.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import streamlit as st
|
2 |
def demo():
|
3 |
-
st.write("yes its work")
|
|
|
4 |
def run_example(image, model, processor, task_prompt, text_input=None):
|
5 |
|
6 |
inputs = processor(text=text_input, images=image, return_tensors="pt")
|
@@ -12,6 +13,6 @@ def run_example(image, model, processor, task_prompt, text_input=None):
|
|
12 |
)
|
13 |
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
14 |
print("generated_text:",generated_text)
|
15 |
-
parsed_answer = processor.post_process_generation(generated_text, task=task_prompt, image_size=(image.width, image.height))
|
16 |
-
return
|
17 |
|
|
|
1 |
import streamlit as st
|
2 |
def demo():
|
3 |
+
#st.write("yes its work")
|
4 |
+
pass
|
5 |
def run_example(image, model, processor, task_prompt, text_input=None):
|
6 |
|
7 |
inputs = processor(text=text_input, images=image, return_tensors="pt")
|
|
|
13 |
)
|
14 |
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
15 |
print("generated_text:",generated_text)
|
16 |
+
# parsed_answer = processor.post_process_generation(generated_text, task=task_prompt, image_size=(image.width, image.height))
|
17 |
+
return generated_text
|
18 |
|