Kadi-IAM commited on
Commit
c3e9bbf
1 Parent(s): 0447793

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +185 -185
app.py CHANGED
@@ -1,185 +1,185 @@
1
- # This app is inspired by:
2
- # https://huggingface.co/spaces/ysharma/Microsoft_Phi-3-Vision-128k
3
- # and ref: https://www.analyticsvidhya.com/blog/2023/12/building-a-multimodal-chatbot-with-gemini-and-gradio/
4
-
5
- import os
6
- import base64
7
-
8
- import gradio as gr
9
- from mistralai import Mistral
10
-
11
- api_key = os.environ["MISTRAL_API_KEY"]
12
-
13
- PLACEHOLDER = """In future, LISA will integrate multimodal model that brings together language and vision capabilities for chatting with papers."""
14
-
15
-
16
- def encode_image(image_path):
17
- """Encode the image to base64."""
18
- try:
19
- with open(image_path, "rb") as image_file:
20
- return base64.b64encode(image_file.read()).decode("utf-8")
21
- except FileNotFoundError:
22
- print(f"Error: The file {image_path} was not found.")
23
- return None
24
- except Exception as e: # Added general exception handling
25
- print(f"Error: {e}")
26
- return None
27
-
28
-
29
- # def image_to_base64(image_path):
30
- # with open(image_path, "rb") as img:
31
- # encoded_string = base64.b64encode(img.read()).decode("utf-8")
32
- # return f"data:image/jpeg;base64,{encoded_string}"
33
-
34
-
35
- def bot_streaming(message, history):
36
- print(f"message is - {message}")
37
- print(f"history is - {history}")
38
- if not message:
39
- raise gr.Error(
40
- "You need to upload an image for vision model to work. Close the error and try again with an Image."
41
- )
42
- if message["files"]:
43
- # message["files"][-1] is a Dict or just a string
44
- if type(message["files"][-1]) == dict:
45
- image = message["files"][-1]["path"]
46
- else:
47
- image = message["files"][-1]
48
- else:
49
- # if there's no image uploaded for this turn, look for images in the past turns
50
- # kept inside tuples, take the last one
51
- for hist in history:
52
- if type(hist[0]) == tuple:
53
- image = hist[0][0]
54
- try:
55
- if image is None:
56
- # Handle the case where image is None
57
- raise gr.Error(
58
- "You need to upload an image for vision model to work. Close the error and try again with an Image."
59
- )
60
- except NameError:
61
- # Handle the case where 'image' is not defined at all
62
- raise gr.Error(
63
- "You need to upload an image for vision model to work. Close the error and try again with an Image."
64
- )
65
-
66
- conversation = []
67
- flag = False
68
- for user, assistant in history:
69
- if assistant is None:
70
- # pass
71
- flag = True
72
- conversation.extend([{"role": "user", "content": ""}])
73
- continue
74
- if flag == True:
75
- conversation[0]["content"] = f"<|image_1|>\n{user}"
76
- conversation.extend([{"role": "assistant", "content": assistant}])
77
- flag = False
78
- continue
79
- conversation.extend(
80
- [
81
- {"role": "user", "content": user},
82
- {"role": "assistant", "content": assistant},
83
- ]
84
- )
85
-
86
- if len(history) == 0:
87
- conversation.append(
88
- {"role": "user", "content": f"<|image_1|>\n{message['text']}"}
89
- )
90
- else:
91
- conversation.append({"role": "user", "content": message["text"]})
92
- print(f"prompt is -\n{conversation}")
93
- base64_image = encode_image(image)
94
-
95
-
96
- # Specify model
97
- model = "pixtral-12b-2409"
98
-
99
- # Initialize the Mistral client
100
- client = Mistral(api_key=api_key)
101
- # inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
102
-
103
- # Generate a response from the model
104
- messages = [
105
- {
106
- "role": "user",
107
- "content": [
108
- {"type": "text", "text": "What's in this image?"},
109
- {
110
- "type": "image_url",
111
- "image_url": f"data:image/jpeg;base64,{base64_image}",
112
- },
113
- ],
114
- }
115
- ]
116
-
117
- # Stream, ref.: https://github.com/mistralai/client-python/blob/main/examples/chatbot_with_streaming.py
118
- stream_response = client.chat.stream(model=model, messages=messages)
119
-
120
- answer = ""
121
- for chunk in stream_response:
122
- response = chunk.data.choices[0].delta.content
123
- if response is not None:
124
- # print(response, end="", flush=True)
125
- answer += response
126
- yield answer
127
-
128
- # bulk inference:
129
- # Get the chat response
130
- # chat_response = client.chat.complete(
131
- # model=model,
132
- # messages=messages
133
- # )
134
- # Print the content of the response
135
- # print(chat_response.choices[0].message.content)
136
- # result = chat_response.choices[0].message.content
137
- # return result
138
-
139
- # streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True, "skip_prompt": True, 'clean_up_tokenization_spaces':False,})
140
- # generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024, do_sample=False, temperature=0.0, eos_token_id=processor.tokenizer.eos_token_id,)
141
-
142
- # thread = Thread(target=model.generate, kwargs=generation_kwargs)
143
- # thread.start()
144
-
145
- # for new_text in streamer:
146
- # buffer += new_text
147
- # yield buffer
148
-
149
-
150
- chatbot = gr.Chatbot(scale=1, placeholder=PLACEHOLDER)
151
- chat_input = gr.MultimodalTextbox(
152
- interactive=True,
153
- file_types=["image"],
154
- placeholder="Enter message or upload figure...",
155
- show_label=False,
156
- )
157
-
158
- with gr.Blocks(
159
- fill_height=True,
160
- ) as demo:
161
- gr.ChatInterface(
162
- fn=bot_streaming,
163
- title="LISA-Vision-test",
164
- examples=[
165
- {"text": "What does this figure describe?", "files": ["./sample1.png"]},
166
- {
167
- "text": "ocr the table in figure and put in Markdown format",
168
- "files": ["./sample2.png"],
169
- },
170
- {
171
- "text": "Explain this XRD figure to me in details.",
172
- "files": ["./sample3.png"],
173
- },
174
- ],
175
- description="Try VLM (Vision Language Model) to chat with characters. Upload an image and start chatting, or just try one of the examples below. If you don't upload an image, you'll get an error.",
176
- stop_btn="Stop Generation",
177
- multimodal=True,
178
- textbox=chat_input,
179
- chatbot=chatbot,
180
- cache_examples=False,
181
- examples_per_page=3,
182
- )
183
-
184
- demo.queue(api_open=False)
185
- demo.launch(share=True)
 
1
+ # This app is inspired by:
2
+ # https://huggingface.co/spaces/ysharma/Microsoft_Phi-3-Vision-128k
3
+ # and ref: https://www.analyticsvidhya.com/blog/2023/12/building-a-multimodal-chatbot-with-gemini-and-gradio/
4
+
5
+ import os
6
+ import base64
7
+
8
+ import gradio as gr
9
+ from mistralai import Mistral
10
+
11
+ api_key = os.environ["MISTRAL_API_KEY"]
12
+
13
+ PLACEHOLDER = """In future, LISA will integrate multimodal model that brings together language and vision capabilities for chatting with papers."""
14
+
15
+
16
+ def encode_image(image_path):
17
+ """Encode the image to base64."""
18
+ try:
19
+ with open(image_path, "rb") as image_file:
20
+ return base64.b64encode(image_file.read()).decode("utf-8")
21
+ except FileNotFoundError:
22
+ print(f"Error: The file {image_path} was not found.")
23
+ return None
24
+ except Exception as e: # Added general exception handling
25
+ print(f"Error: {e}")
26
+ return None
27
+
28
+
29
+ # def image_to_base64(image_path):
30
+ # with open(image_path, "rb") as img:
31
+ # encoded_string = base64.b64encode(img.read()).decode("utf-8")
32
+ # return f"data:image/jpeg;base64,{encoded_string}"
33
+
34
+
35
+ def bot_streaming(message, history):
36
+ print(f"message is - {message}")
37
+ print(f"history is - {history}")
38
+ if not message:
39
+ raise gr.Error(
40
+ "You need to upload an image for vision model to work. Close the error and try again with an Image."
41
+ )
42
+ if message["files"]:
43
+ # message["files"][-1] is a Dict or just a string
44
+ if type(message["files"][-1]) == dict:
45
+ image = message["files"][-1]["path"]
46
+ else:
47
+ image = message["files"][-1]
48
+ else:
49
+ # if there's no image uploaded for this turn, look for images in the past turns
50
+ # kept inside tuples, take the last one
51
+ for hist in history:
52
+ if type(hist[0]) == tuple:
53
+ image = hist[0][0]
54
+ try:
55
+ if image is None:
56
+ # Handle the case where image is None
57
+ raise gr.Error(
58
+ "You need to upload an image for vision model to work. Close the error and try again with an Image."
59
+ )
60
+ except NameError:
61
+ # Handle the case where 'image' is not defined at all
62
+ raise gr.Error(
63
+ "You need to upload an image for vision model to work. Close the error and try again with an Image."
64
+ )
65
+
66
+ conversation = []
67
+ flag = False
68
+ for user, assistant in history:
69
+ if assistant is None:
70
+ # pass
71
+ flag = True
72
+ conversation.extend([{"role": "user", "content": ""}])
73
+ continue
74
+ if flag == True:
75
+ conversation[0]["content"] = f"<|image_1|>\n{user}"
76
+ conversation.extend([{"role": "assistant", "content": assistant}])
77
+ flag = False
78
+ continue
79
+ conversation.extend(
80
+ [
81
+ {"role": "user", "content": user},
82
+ {"role": "assistant", "content": assistant},
83
+ ]
84
+ )
85
+
86
+ if len(history) == 0:
87
+ conversation.append(
88
+ {"role": "user", "content": f"<|image_1|>\n{message['text']}"}
89
+ )
90
+ else:
91
+ conversation.append({"role": "user", "content": message["text"]})
92
+ print(f"prompt is -\n{conversation}")
93
+ base64_image = encode_image(image)
94
+
95
+
96
+ # Specify model
97
+ model = "pixtral-12b-2409"
98
+
99
+ # Initialize the Mistral client
100
+ client = Mistral(api_key=api_key)
101
+ # inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
102
+
103
+ # Generate a response from the model
104
+ messages = [
105
+ {
106
+ "role": "user",
107
+ "content": [
108
+ {"type": "text", "text": "What's in this image?"},
109
+ {
110
+ "type": "image_url",
111
+ "image_url": f"data:image/jpeg;base64,{base64_image}",
112
+ },
113
+ ],
114
+ }
115
+ ]
116
+
117
+ # Stream, ref.: https://github.com/mistralai/client-python/blob/main/examples/chatbot_with_streaming.py
118
+ stream_response = client.chat.stream(model=model, messages=messages)
119
+
120
+ answer = ""
121
+ for chunk in stream_response:
122
+ response = chunk.data.choices[0].delta.content
123
+ if response is not None:
124
+ # print(response, end="", flush=True)
125
+ answer += response
126
+ yield answer
127
+
128
+ # bulk inference:
129
+ # Get the chat response
130
+ # chat_response = client.chat.complete(
131
+ # model=model,
132
+ # messages=messages
133
+ # )
134
+ # Print the content of the response
135
+ # print(chat_response.choices[0].message.content)
136
+ # result = chat_response.choices[0].message.content
137
+ # return result
138
+
139
+ # streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True, "skip_prompt": True, 'clean_up_tokenization_spaces':False,})
140
+ # generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024, do_sample=False, temperature=0.0, eos_token_id=processor.tokenizer.eos_token_id,)
141
+
142
+ # thread = Thread(target=model.generate, kwargs=generation_kwargs)
143
+ # thread.start()
144
+
145
+ # for new_text in streamer:
146
+ # buffer += new_text
147
+ # yield buffer
148
+
149
+
150
+ chatbot = gr.Chatbot(scale=1, placeholder=PLACEHOLDER)
151
+ chat_input = gr.MultimodalTextbox(
152
+ interactive=True,
153
+ file_types=["image"],
154
+ placeholder="Enter message or upload figure...",
155
+ show_label=False,
156
+ )
157
+
158
+ with gr.Blocks(
159
+ fill_height=True,
160
+ ) as demo:
161
+ gr.ChatInterface(
162
+ fn=bot_streaming,
163
+ title="LISA-Vision-test",
164
+ examples=[
165
+ {"text": "What does this figure describe?", "files": ["./sample1.png"]},
166
+ {
167
+ "text": "ocr the table in figure and put in Markdown format",
168
+ "files": ["./sample2.png"],
169
+ },
170
+ {
171
+ "text": "Explain this XRD figure to me in details.",
172
+ "files": ["./sample3.png"],
173
+ },
174
+ ],
175
+ description="Try VLM (Vision Language Model) to chat with characters. Upload an image and start chatting, or just try one of the examples below. If you don't upload an image, you'll get an error.",
176
+ stop_btn="Stop Generation",
177
+ multimodal=True,
178
+ textbox=chat_input,
179
+ chatbot=chatbot,
180
+ cache_examples=False,
181
+ examples_per_page=3,
182
+ )
183
+
184
+ demo.queue(api_open=False)
185
+ demo.launch(share=False)