File size: 4,287 Bytes
48f7a01 f45a6d7 0b97654 19d82a1 48f7a01 0d55847 48f7a01 0b97654 f45a6d7 19d82a1 f45a6d7 03037e8 0b97654 0d55847 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
import os
import json
from google.oauth2 import service_account
from cryptography.fernet import Fernet
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from vertexai.preview.vision_models import Image
from vertexai.preview.vision_models import ImageQnAModel
import vertexai
PROJECT_ID = "franz-media-1512554302520"
LOCATION = "us-central1"
CRED_PATH = "creds.json"
with open("key.json","w") as f:
encrypted_data = f.read()
cipher_suite = Fernet(os.environ["ENCRYPTION_KEY"])
decrypted_data = cipher_suite.decrypt(encrypted_data)
with open(CRED_PATH,"wb") as f:
f.write(decrypted_data)
print("stored")
credentials = service_account.Credentials.from_service_account_file(CRED_PATH)
vertexai.init(project=PROJECT_ID, location=LOCATION,credentials=credentials)
image_qna_model = ImageQnAModel.from_pretrained("imagetext@001")
template = """You are a super smart and charming GPT living inside of a plant, every day you get a text with your status. Your task then is to write a flirty message to your owner.
Status Data:
{question}
Let's think step by step.
Flirty message:
"""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = ChatOpenAI(model="gpt-4")
llm_chain = LLMChain(prompt=prompt, llm=llm)
def detect_question(image_path, question):
# Ask a question about the image
image = Image.load_from_file(image_path)
return image_qna_model.ask_question(image=image, question=question)[0]
import gradio as gr
import os
import time
# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
local_history = []
global_cache = {}
def add_text(history, text):
global global_history, global_message
history = history + [(text, None)]
return history, gr.Textbox(value="", interactive=False)
def add_file(history, file):
history = history + [((file.name,), None)]
return history
def bot(history):
global global_cache
last_msg = history[-1][-0]
if isinstance(last_msg, tuple):
last_msg = last_msg[0]
# check if last message is an existing path
history[-1][1] = ""
global_cache["history"] = history
global_cache["last_msg"] = last_msg
if os.path.exists(last_msg):
history[-1][1] += "Detecting image..."
yield history
answer = detect_question(
last_msg,
"Your task is to save the main plant, classify what kind of plant it is:",
)
history[-1][1] = f"Plant detected: {answer}\n"
yield history
answer = detect_question(
last_msg,
"Where is orange indicator on the moist level on the soil hydrometer? DRY, MOIST or WET?",
)
history[-1][1] += f"Hydration level detected: {answer}\n"
yield history
answer = detect_question(
last_msg,
"Your task is to save the main plant, does it have a visible disease:",
)
history[-1][1] += f"Disease detected: {answer}\n"
yield history
status = history[-1][1]
chat = llm_chain.run(status)
history.append((chat, None))
yield history
else:
history[-1][1] = "Thinking..."
def change_fn(*args, **kwargs):
global_cache["args"] = args
# global_history = history
# return history
with gr.Blocks() as demo:
chatbot = gr.Chatbot(
local_history,
elem_id="chatbot",
bubble_full_width=False,
)
with gr.Row():
txt = gr.Textbox(
scale=4,
show_label=False,
placeholder="Enter text and press enter, or upload an image",
container=False,
)
btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
bot, chatbot, chatbot, api_name="bot_response"
)
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(
bot, chatbot, chatbot
)
demo.launch(auth=("admin", os.environ["DEMO_KEY"]))
|