Madhuri123 commited on
Commit
fb3dfe7
·
verified ·
1 Parent(s): 9366733

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -21
app.py CHANGED
@@ -17,15 +17,15 @@ def load_model_and_processor(model_id):
17
  processor = AutoProcessor.from_pretrained(model_id)
18
  return model, processor
19
 
20
- def generate_text(model, processor, image_url, prompt):
21
- """Generate text using the model and processor."""
22
- try:
23
- image = Image.open(requests.get(image_url, stream=True).raw)
24
- inputs = processor(image, prompt, return_tensors="pt").to(model.device)
25
- output = model.generate(**inputs, max_new_tokens=30)
26
- return processor.decode(output[0])
27
- except Exception as e:
28
- return f"Error: {e}"
29
 
30
  # Streamlit App
31
  st.title("LLaMA 3 Vision Haiku Generator")
@@ -33,23 +33,23 @@ st.title("LLaMA 3 Vision Haiku Generator")
33
  # Model ID and loading
34
  MODEL_ID = "meta-llama/Llama-3.2-11B-Vision"
35
  model, processor = load_model_and_processor(MODEL_ID)
36
-
37
  # User input for image URL and prompt
38
- image_url = st.text_input("Enter the Image URL:", "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg")
39
 
40
- prompt = st.text_area("Enter your prompt:", "<|image|><|begin_of_text|>If I had to write a haiku for this one")
41
 
42
- if st.button("Generate Haiku"):
43
- with st.spinner("Generating haiku..."):
44
- result = generate_text(model, processor, image_url, prompt)
45
 
46
- st.subheader("Generated Text")
47
- st.write(result)
48
 
49
- try:
50
- st.image(image_url, caption="Input Image")
51
- except Exception:
52
- st.error("Failed to load image. Please check the URL.")
53
 
54
 
55
 
 
17
  processor = AutoProcessor.from_pretrained(model_id)
18
  return model, processor
19
 
20
+ # def generate_text(model, processor, image_url, prompt):
21
+ # """Generate text using the model and processor."""
22
+ # try:
23
+ # image = Image.open(requests.get(image_url, stream=True).raw)
24
+ # inputs = processor(image, prompt, return_tensors="pt").to(model.device)
25
+ # output = model.generate(**inputs, max_new_tokens=30)
26
+ # return processor.decode(output[0])
27
+ # except Exception as e:
28
+ # return f"Error: {e}"
29
 
30
  # Streamlit App
31
  st.title("LLaMA 3 Vision Haiku Generator")
 
33
  # Model ID and loading
34
  MODEL_ID = "meta-llama/Llama-3.2-11B-Vision"
35
  model, processor = load_model_and_processor(MODEL_ID)
36
+ print(model)
37
  # User input for image URL and prompt
38
+ # image_url = st.text_input("Enter the Image URL:", "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg")
39
 
40
+ # prompt = st.text_area("Enter your prompt:", "<|image|><|begin_of_text|>If I had to write a haiku for this one")
41
 
42
+ # if st.button("Generate Haiku"):
43
+ # with st.spinner("Generating haiku..."):
44
+ # result = generate_text(model, processor, image_url, prompt)
45
 
46
+ # st.subheader("Generated Text")
47
+ # st.write(result)
48
 
49
+ # try:
50
+ # st.image(image_url, caption="Input Image")
51
+ # except Exception:
52
+ # st.error("Failed to load image. Please check the URL.")
53
 
54
 
55