plantgpt / app.py
franz101's picture
Update app.py
c0a6b99
import os
import json
from google.oauth2 import service_account
from cryptography.fernet import Fernet
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from vertexai.preview.vision_models import Image
from vertexai.preview.vision_models import ImageQnAModel
import vertexai
PROJECT_ID = "franz-media-1512554302520"
LOCATION = "us-central1"
CRED_PATH = "creds.json"
with open("/home/user/app/key.json","rb") as f:
encrypted_data = f.read()
cipher_suite = Fernet(os.environ["ENCRYPTION_KEY"])
decrypted_data = cipher_suite.decrypt(encrypted_data)
with open(CRED_PATH,"wb") as f:
f.write(decrypted_data)
print("stored")
credentials = service_account.Credentials.from_service_account_file(CRED_PATH)
vertexai.init(project=PROJECT_ID, location=LOCATION,credentials=credentials)
image_qna_model = ImageQnAModel.from_pretrained("imagetext@001")
template = """You are a super smart and charming GPT living inside of a plant, every day you get a text with your status. Your task then is to write a flirty message to your owner.
Status Data:
{question}
Let's think step by step.
Flirty message:
"""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = ChatOpenAI(model="gpt-4")
llm_chain = LLMChain(prompt=prompt, llm=llm)
def detect_question(image_path, question):
# Ask a question about the image
image = Image.load_from_file(image_path)
return image_qna_model.ask_question(image=image, question=question)[0]
import gradio as gr
import os
import time
# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
local_history = []
global_cache = {}
def add_text(history, text):
global global_history, global_message
history = history + [(text, None)]
return history, gr.Textbox(value="", interactive=False)
def add_file(history, file):
history = history + [((file.name,), None)]
return history
def bot(history):
global global_cache
last_msg = history[-1][-0]
if isinstance(last_msg, tuple):
last_msg = last_msg[0]
# check if last message is an existing path
history[-1][1] = ""
global_cache["history"] = history
global_cache["last_msg"] = last_msg
if os.path.exists(last_msg):
history[-1][1] += "Detecting image..."
yield history
answer = detect_question(
last_msg,
"Your task is to save the main plant, classify what kind of plant it is:",
)
history[-1][1] = f"Plant detected: {answer}\n"
yield history
answer = detect_question(
last_msg,
"Where is orange indicator on the moist level on the soil hydrometer? DRY, MOIST or WET?",
)
history[-1][1] += f"Hydration level detected: {answer}\n"
yield history
answer = detect_question(
last_msg,
"Your task is to save the main plant, does it have a visible disease:",
)
history[-1][1] += f"Disease detected: {answer}\n"
yield history
status = history[-1][1]
chat = llm_chain.run(status)
history.append((chat, None))
yield history
else:
history[-1][1] = "Thinking..."
def change_fn(*args, **kwargs):
global_cache["args"] = args
# global_history = history
# return history
with gr.Blocks() as demo:
chatbot = gr.Chatbot(
local_history,
elem_id="chatbot",
bubble_full_width=False,
)
with gr.Row():
txt = gr.Textbox(
scale=4,
show_label=False,
placeholder="Enter text and press enter, or upload an image",
container=False,
)
btn = gr.UploadButton("πŸ“", file_types=["image", "video", "audio"])
txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
bot, chatbot, chatbot, api_name="bot_response"
)
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(
bot, chatbot, chatbot
)
demo.launch(auth=("admin", os.environ["DEMO_KEY"]))