Madhuri123 commited on
Commit
a8abd47
1 Parent(s): 05e38b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -0
app.py CHANGED
@@ -5,6 +5,7 @@ from PIL import Image
5
  from transformers import MllamaForConditionalGeneration, AutoProcessor
6
  from huggingface_hub import login
7
  import io
 
8
  # Authenticate with Hugging Face
9
  HF_TOKEN = st.secrets["newfinegrained"]
10
  login(HF_TOKEN)
@@ -22,9 +23,16 @@ def load_model_and_processor(model_id):
22
  def generate_text(model, processor, image_url, prompt):
23
  """Generate text using the model and processor."""
24
  try:
 
 
 
25
  image = Image.open(io.BytesIO(response.content))
 
 
26
  inputs = processor(image, prompt, return_tensors="pt").to(model.device)
27
  output = model.generate(**inputs, max_new_tokens=30)
 
 
28
  return processor.decode(output[0])
29
  except Exception as e:
30
  return f"Error: {e}"
@@ -64,6 +72,7 @@ if st.button("Generate Haiku"):
64
 
65
 
66
 
 
67
 
68
 
69
 
 
5
  from transformers import MllamaForConditionalGeneration, AutoProcessor
6
  from huggingface_hub import login
7
  import io
8
+
9
  # Authenticate with Hugging Face
10
  HF_TOKEN = st.secrets["newfinegrained"]
11
  login(HF_TOKEN)
 
23
  def generate_text(model, processor, image_url, prompt):
24
  """Generate text using the model and processor."""
25
  try:
26
+ # Fetch the image from the URL
27
+ response = requests.get(image_url)
28
+ response.raise_for_status() # Raise an error if the request fails
29
  image = Image.open(io.BytesIO(response.content))
30
+
31
+ # Process the image and prompt
32
  inputs = processor(image, prompt, return_tensors="pt").to(model.device)
33
  output = model.generate(**inputs, max_new_tokens=30)
34
+
35
+ # Decode the output
36
  return processor.decode(output[0])
37
  except Exception as e:
38
  return f"Error: {e}"
 
72
 
73
 
74
 
75
+
76
 
77
 
78