Update app.py
Browse files
app.py
CHANGED
@@ -1,17 +1,32 @@
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
)
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
def format_prompt(message, history):
|
10 |
-
prompt = "
|
11 |
for user_prompt, bot_response in history:
|
12 |
-
prompt += f"
|
13 |
-
prompt += f"
|
14 |
-
prompt += f"
|
15 |
return prompt
|
16 |
|
17 |
def generate(
|
@@ -98,7 +113,7 @@ gr.ChatInterface(
|
|
98 |
chatbot=gr.Chatbot(show_label=True, show_share_button=True, show_copy_button=True, likeable=True, layout="bubble", bubble_full_width=False),
|
99 |
additional_inputs=additional_inputs,
|
100 |
title="Hey Gemini",
|
101 |
-
description="Gemini Sprint submission by Rishiraj Acharya. Uses
|
102 |
theme="Soft",
|
103 |
examples=examples,
|
104 |
concurrency_limit=20,
|
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
+
import http.client
|
4 |
+
import typing
|
5 |
+
import urllib.request
|
6 |
+
import vertexai
|
7 |
+
from vertexai.generative_models import GenerativeModel, Image
|
8 |
|
9 |
+
# vertexai.init(project=project_id)
|
10 |
+
model = GenerativeModel("gemini-1.0-pro-vision")
|
11 |
+
client = InferenceClient("google/gemma-7b-it")
|
12 |
|
13 |
+
def load_image_from_url(image_url: str) -> Image:
|
14 |
+
with urllib.request.urlopen(image_url) as response:
|
15 |
+
response = typing.cast(http.client.HTTPResponse, response)
|
16 |
+
image_bytes = response.read()
|
17 |
+
return Image.from_bytes(image_bytes)
|
18 |
+
|
19 |
+
def search(url):
|
20 |
+
image = load_image_from_url(url)
|
21 |
+
response = model.generate_content([image,"what is shown in this image?"])
|
22 |
+
return response.text
|
23 |
|
24 |
def format_prompt(message, history):
|
25 |
+
prompt = ""
|
26 |
for user_prompt, bot_response in history:
|
27 |
+
prompt += f"<start_of_turn>user\n{user_prompt}<end_of_turn>\n"
|
28 |
+
prompt += f"<start_of_turn>model\n{bot_response}<end_of_turn>\n"
|
29 |
+
prompt += f"<start_of_turn>user\n{message}<end_of_turn>\n<start_of_turn>model\n"
|
30 |
return prompt
|
31 |
|
32 |
def generate(
|
|
|
113 |
chatbot=gr.Chatbot(show_label=True, show_share_button=True, show_copy_button=True, likeable=True, layout="bubble", bubble_full_width=False),
|
114 |
additional_inputs=additional_inputs,
|
115 |
title="Hey Gemini",
|
116 |
+
description="Gemini Sprint submission by Rishiraj Acharya. Uses Google's Gemini 1.0 Pro Vision multimodal model from Vertex AI with Google's Gemma 7B Instruct model from Hugging Face.",
|
117 |
theme="Soft",
|
118 |
examples=examples,
|
119 |
concurrency_limit=20,
|