Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,557 Bytes
c98b207 ab89095 c98b207 cc9dc77 c98b207 6e89311 c98b207 ab89095 c98b207 ab89095 c98b207 5adecab 2692054 90b9de8 ab89095 c98b207 0278a97 ab89095 c98b207 2692054 6904764 90b9de8 67d3fd3 6cb9c18 775d6e0 0620ff6 ab89095 0620ff6 ab89095 c98b207 ab89095 c98b207 be961e6 ab89095 c98b207 cf7a112 e7455bb 60e7596 a927087 60e7596 e7455bb c98b207 0d0766f c98b207 a927087 c98b207 a927087 c98b207 bb45d22 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
from PIL import Image
import gradio as gr
import spaces
import os
from huggingface_hub import hf_hub_download
import base64
from llama_cpp import Llama
from llama_cpp.llama_chat_format import MoondreamChatHandler
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
MODEL_LIST = ["openbmb/MiniCPM-Llama3-V-2_5","openbmb/MiniCPM-Llama3-V-2_5-int4"]
HF_TOKEN = os.environ.get("HF_TOKEN", None)
MODEL_ID = os.environ.get("MODEL_ID")
MODEL_NAME = MODEL_ID.split("/")[-1]
TITLE = "<h1><center>VL-Chatbox</center></h1>"
DESCRIPTION = f'<h3><center>MODEL: <a href="https://hf.co/{MODEL_ID}">{MODEL_NAME}</a></center></h3>'
CSS = """
.duplicate-button {
margin: auto !important;
color: white !important;
background: black !important;
border-radius: 100vh !important;
}
"""
chat_handler = MoondreamChatHandler.from_pretrained(
repo_id="openbmb/MiniCPM-Llama3-V-2_5-gguf",
filename="*mmproj*",
)
llm = Llama.from_pretrained(
repo_id="openbmb/MiniCPM-Llama3-V-2_5-gguf",
filename="ggml-model-Q5_K_M.gguf",
chat_handler=chat_handler,
n_ctx=2048, # n_ctx should be increased to accommodate the image embedding
)
@spaces.GPU(queue=False)
def stream_chat(message, history: list, temperature: float, max_new_tokens: int):
print(f'message is - {message}')
print(f'history is - {history}')
messages = []
if message["files"]:
image = Image.open(message["files"][-1]).convert('RGB')
messages.append({
"role": "user",
"content": [
{"type": "text", "text": message['text']},
{"type": "image_url", "image_url":{"url": image}}
]
})
else:
if len(history) == 0:
raise gr.Error("Please upload an image first.")
image = None
else:
image = Image.open(history[0][0][0])
for prompt, answer in history:
if answer is None:
messages.extend([{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{"type": "image_url", "image_url": {"url": image}}
]
},{
"role": "assistant",
"content": ""
}])
else:
messages.extend([{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{"type": "image_url", "image_url": {"url": image}}
]
}, {
"role": "assistant",
"content": answer
}])
messages.append({"role": "user", "content": message['text']})
print(f"Messages is -\n{messages}")
response = llm.create_chat_completion(
messages = messages,
temperature=temperature,
max_tokens=max_new_tokens,
stream=True
)
return response["choices"][0]["text"]
chatbot = gr.Chatbot(height=450)
chat_input = gr.MultimodalTextbox(
interactive=True,
file_types=["image"],
placeholder="Enter message or upload file...",
show_label=False,
)
EXAMPLES = [
[{"text": "What is on the desk?", "files": ["./laptop.jpg"]}],
[{"text": "Where it is?", "files": ["./hotel.jpg"]}],
[{"text": "Can yo describe this image?", "files": ["./spacecat.png"]}]
]
with gr.Blocks(css=CSS) as demo:
gr.HTML(TITLE)
gr.HTML(DESCRIPTION)
gr.DuplicateButton(value="Duplicate Space for private use", elem_classes="duplicate-button")
gr.ChatInterface(
fn=stream_chat,
multimodal=True,
textbox=chat_input,
chatbot=chatbot,
fill_height=True,
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
additional_inputs=[
gr.Slider(
minimum=0,
maximum=1,
step=0.1,
value=0.8,
label="Temperature",
render=False,
),
gr.Slider(
minimum=128,
maximum=4096,
step=1,
value=1024,
label="Max new tokens",
render=False,
),
],
),
gr.Examples(EXAMPLES,[chat_input])
if __name__ == "__main__":
demo.queue(api_open=False).launch(show_api=False, share=False) |