hassanelmghari commited on
Commit
00c5acf
1 Parent(s): 5fc2eb2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -24
app.py CHANGED
@@ -19,19 +19,26 @@ def initialize_client(api_key=None):
19
  raise ValueError("Please provide a Together API Key")
20
 
21
  def encode_image(image_path):
22
- with Image.open(image_path) as img:
23
- buffered = io.BytesIO()
24
- img.save(buffered, format="PNG")
25
- return base64.b64encode(buffered.getvalue()).decode('utf-8')
 
 
 
 
26
 
27
  def bot_streaming(message, history, together_api_key, max_new_tokens=250, temperature=0.7):
 
28
  if history is None:
29
  history = []
 
 
30
  if client is None:
31
  try:
32
  initialize_client(together_api_key)
33
  except Exception as e:
34
- # Initialize history with error message
35
  history.append(["Error initializing client", str(e)])
36
  yield history
37
  return
@@ -40,10 +47,22 @@ def bot_streaming(message, history, together_api_key, max_new_tokens=250, temper
40
 
41
  messages = [{"role": "system", "content": prompt}]
42
 
43
- # Build the conversation history
44
- for user_msg, assistant_msg in history:
45
- messages.append({"role": "user", "content": [{"type": "text", "text": user_msg}]})
46
- messages.append({"role": "assistant", "content": [{"type": "text", "text": assistant_msg}]})
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
  # Prepare the current message
49
  content = []
@@ -51,37 +70,49 @@ def bot_streaming(message, history, together_api_key, max_new_tokens=250, temper
51
 
52
  try:
53
  if isinstance(message, dict):
 
54
  if 'text' in message and message['text']:
55
  user_text = message['text']
56
  content.append({"type": "text", "text": user_text})
 
 
57
  if 'files' in message and len(message['files']) > 0:
58
  file_info = message['files'][0]
59
- if isinstance(file_info, dict) and 'name' in file_info:
60
- image_path = file_info['name']
61
  elif isinstance(file_info, str):
62
  image_path = file_info
63
  else:
64
- raise ValueError("Invalid file information.")
65
 
 
66
  image_base64 = encode_image(image_path)
67
- content.append({"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}})
 
 
 
68
  user_text += "\n[User uploaded an image]"
69
  else:
 
70
  user_text = message
71
  content.append({"type": "text", "text": user_text})
72
  except Exception as e:
73
- # Update history before yielding
74
- history.append([user_text, f"An error occurred while processing your input: {str(e)}"])
 
 
75
  yield history
76
  return
77
 
78
- messages.append({"role": "user", "content": content})
79
-
80
- # Update the history with the new user message (with empty assistant response)
81
  history.append([user_text, ""])
82
- yield history
 
 
 
83
 
84
  try:
 
85
  stream = client.chat.completions.create(
86
  model="meta-llama/Llama-Vision-Free",
87
  messages=messages,
@@ -92,19 +123,38 @@ def bot_streaming(message, history, together_api_key, max_new_tokens=250, temper
92
 
93
  response = ""
94
  for chunk in stream:
95
- response += chunk.choices[0].delta.content or ""
96
- history[-1][1] = response
97
- yield history
 
 
 
 
 
 
 
 
98
 
99
  if not response:
 
100
  history[-1][1] = "No response generated. Please try again."
101
  yield history
102
 
103
  except Exception as e:
 
 
104
  if "Request Entity Too Large" in str(e):
105
- history[-1][1] = "The image is too large. Please try with a smaller image or compress the existing one."
 
 
 
 
 
 
 
106
  else:
107
- history[-1][1] = f"An error occurred: {str(e)}"
 
108
  yield history
109
 
110
  with gr.Blocks() as demo:
 
19
  raise ValueError("Please provide a Together API Key")
20
 
21
  def encode_image(image_path):
22
+ try:
23
+ with Image.open(image_path) as img:
24
+ buffered = io.BytesIO()
25
+ img.save(buffered, format="PNG")
26
+ return base64.b64encode(buffered.getvalue()).decode('utf-8')
27
+ except Exception as e:
28
+ print(f"Error encoding image: {e}")
29
+ raise e
30
 
31
  def bot_streaming(message, history, together_api_key, max_new_tokens=250, temperature=0.7):
32
+ # Initialize history if it's None
33
  if history is None:
34
  history = []
35
+
36
+ # Initialize the Together client if not already done
37
  if client is None:
38
  try:
39
  initialize_client(together_api_key)
40
  except Exception as e:
41
+ # Append error to history and yield
42
  history.append(["Error initializing client", str(e)])
43
  yield history
44
  return
 
47
 
48
  messages = [{"role": "system", "content": prompt}]
49
 
50
+ # Build the conversation history for the API
51
+ for idx, (user_msg, assistant_msg) in enumerate(history):
52
+ # Append user messages
53
+ messages.append({
54
+ "role": "user",
55
+ "content": [
56
+ {"type": "text", "text": user_msg}
57
+ ]
58
+ })
59
+ # Append assistant messages
60
+ messages.append({
61
+ "role": "assistant",
62
+ "content": [
63
+ {"type": "text", "text": assistant_msg}
64
+ ]
65
+ })
66
 
67
  # Prepare the current message
68
  content = []
 
70
 
71
  try:
72
  if isinstance(message, dict):
73
+ # Handle text input
74
  if 'text' in message and message['text']:
75
  user_text = message['text']
76
  content.append({"type": "text", "text": user_text})
77
+
78
+ # Handle image input
79
  if 'files' in message and len(message['files']) > 0:
80
  file_info = message['files'][0]
81
+ if isinstance(file_info, dict) and 'path' in file_info:
82
+ image_path = file_info['path']
83
  elif isinstance(file_info, str):
84
  image_path = file_info
85
  else:
86
+ raise ValueError("Invalid file information provided.")
87
 
88
+ # Encode the image to base64
89
  image_base64 = encode_image(image_path)
90
+ content.append({
91
+ "type": "image_url",
92
+ "image_url": {"url": f"data:image/png;base64,{image_base64}"}
93
+ })
94
  user_text += "\n[User uploaded an image]"
95
  else:
96
+ # If message is a string
97
  user_text = message
98
  content.append({"type": "text", "text": user_text})
99
  except Exception as e:
100
+ # If there's an error processing the input, append it to history and yield
101
+ error_message = f"An error occurred while processing your input: {str(e)}"
102
+ print(error_message) # Debug statement
103
+ history.append([user_text or "[Invalid input]", error_message])
104
  yield history
105
  return
106
 
107
+ # Append the new user message with an empty assistant response
 
 
108
  history.append([user_text, ""])
109
+ yield history # Yield the updated history to show the user's message immediately
110
+
111
+ # Append the current user message to the API messages
112
+ messages.append({"role": "user", "content": content})
113
 
114
  try:
115
+ # Call the Together AI API with streaming
116
  stream = client.chat.completions.create(
117
  model="meta-llama/Llama-Vision-Free",
118
  messages=messages,
 
123
 
124
  response = ""
125
  for chunk in stream:
126
+ # Extract the content from the API response
127
+ chunk_content = chunk.choices[0].delta.content or ""
128
+ response += chunk_content
129
+ # Update the last assistant message in history
130
+ if history:
131
+ history[-1][1] = response
132
+ yield history
133
+ else:
134
+ # If history is somehow empty, append the response
135
+ history.append(["", response])
136
+ yield history
137
 
138
  if not response:
139
+ # If no response was generated, notify the user
140
  history[-1][1] = "No response generated. Please try again."
141
  yield history
142
 
143
  except Exception as e:
144
+ # Handle exceptions from the API call
145
+ error_message = ""
146
  if "Request Entity Too Large" in str(e):
147
+ error_message = "The image is too large. Please try with a smaller image or compress the existing one."
148
+ else:
149
+ error_message = f"An error occurred: {str(e)}"
150
+
151
+ print(error_message) # Debug statement
152
+
153
+ if history:
154
+ history[-1][1] = error_message
155
  else:
156
+ history.append(["", error_message])
157
+
158
  yield history
159
 
160
  with gr.Blocks() as demo: