|
import os |
|
import sys |
|
|
|
import gradio as gr |
|
|
|
sys.path.append("./ctm") |
|
from ctm.ctms.ctm_base import BaseConsciousnessTuringMachine |
|
|
|
ctm = BaseConsciousnessTuringMachine() |
|
ctm.add_supervisor("gpt4_supervisor") |
|
|
|
DEPLOYED = os.getenv("DEPLOYED", "true").lower() == "true" |
|
|
|
|
|
def convert_base64(image_array): |
|
image = Image.fromarray(image_array) |
|
buffer = io.BytesIO() |
|
image.save(buffer, format="PNG") |
|
byte_data = buffer.getvalue() |
|
base64_string = base64.b64encode(byte_data).decode("utf-8") |
|
return base64_string |
|
|
|
|
|
def introduction(): |
|
with gr.Column(scale=2): |
|
gr.Image("images/CTM-AI.png", elem_id="banner-image", show_label=False) |
|
with gr.Column(scale=5): |
|
gr.Markdown( |
|
"""Consciousness Turing Machine Demo |
|
""" |
|
) |
|
|
|
|
|
def add_processor(processor_name, display_name, state): |
|
print("add processor ", processor_name) |
|
ctm.add_processor(processor_name) |
|
print(ctm.processor_group_map) |
|
print(len(ctm.processor_list)) |
|
return display_name + " (added)" |
|
|
|
|
|
def processor_tab(): |
|
|
|
text_processors = [ |
|
"gpt4_text_emotion_processor", |
|
"gpt4_text_summary_processor", |
|
"gpt4_speaker_intent_processor", |
|
"roberta_text_sentiment_processor", |
|
] |
|
vision_processors = [ |
|
"gpt4v_cloth_fashion_processor", |
|
"gpt4v_face_emotion_processor", |
|
"gpt4v_ocr_processor", |
|
"gpt4v_posture_processor", |
|
"gpt4v_scene_location_processor", |
|
] |
|
|
|
with gr.Blocks(): |
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
gr.Markdown("### Text Processors") |
|
for model_name in text_processors: |
|
display_name = ( |
|
model_name.replace("processor", "") |
|
.replace("_", " ") |
|
.title() |
|
) |
|
|
|
button = gr.Button(display_name) |
|
processor_name = gr.Textbox( |
|
value=model_name, visible=False |
|
) |
|
display_name = gr.Textbox( |
|
value=display_name, visible=False |
|
) |
|
button.click( |
|
fn=add_processor, |
|
inputs=[processor_name, display_name, gr.State()], |
|
outputs=[button], |
|
) |
|
|
|
with gr.Column(scale=1): |
|
gr.Markdown("### Vision Processors") |
|
for model_name in vision_processors: |
|
display_name = ( |
|
model_name.replace("processor", "") |
|
.replace("_", " ") |
|
.title() |
|
) |
|
|
|
button = gr.Button(display_name) |
|
processor_name = gr.Textbox( |
|
value=model_name, visible=False |
|
) |
|
display_name = gr.Textbox( |
|
value=display_name, visible=False |
|
) |
|
button.click( |
|
fn=add_processor, |
|
inputs=[processor_name, display_name, gr.State()], |
|
outputs=[button], |
|
) |
|
|
|
|
|
def forward(query, content, image, state): |
|
state["question"] = query |
|
ask_processors_output_info, state = ask_processors( |
|
query, content, image, state |
|
) |
|
uptree_competition_output_info, state = uptree_competition(state) |
|
ask_supervisor_output_info, state = ask_supervisor(state) |
|
|
|
ctm.downtree_broadcast(state["winning_output"]) |
|
ctm.link_form(state["processor_output"]) |
|
return ( |
|
ask_processors_output_info, |
|
uptree_competition_output_info, |
|
ask_supervisor_output_info, |
|
state, |
|
) |
|
|
|
|
|
def ask_processors(query, text, image, state): |
|
|
|
processor_output = ctm.ask_processors( |
|
query=query, |
|
text=text, |
|
image=image, |
|
) |
|
output_info = "" |
|
for name, info in processor_output.items(): |
|
output_info += f"{name}: {info['gist']}\n" |
|
state["processor_output"] = processor_output |
|
return output_info, state |
|
|
|
|
|
def uptree_competition(state): |
|
winning_output = ctm.uptree_competition(state["processor_output"]) |
|
state["winning_output"] = winning_output |
|
output_info = ( |
|
"The winning processor is: {}\nThe winning gist is: {}\n".format( |
|
winning_output["name"], winning_output["gist"] |
|
) |
|
) |
|
return output_info, state |
|
|
|
|
|
def ask_supervisor(state): |
|
question = state["question"] |
|
winning_output = state["winning_output"] |
|
answer, score = ctm.ask_supervisor(question, winning_output) |
|
output_info = f'The answer to the query "{question}" is: {answer}\nThe confidence for answering is: {score}\n' |
|
state["answer"] = answer |
|
state["score"] = score |
|
return output_info, state |
|
|
|
|
|
def interface_tab(): |
|
with gr.Blocks(): |
|
state = gr.State({}) |
|
|
|
with gr.Column(): |
|
|
|
text = gr.Textbox(label="Enter your text here") |
|
query = gr.Textbox(label="Enter your query here") |
|
image = gr.Image(label="Upload your image") |
|
|
|
|
|
|
|
|
|
forward_button = gr.Button("Start CTM forward process") |
|
|
|
|
|
processors_output = gr.Textbox( |
|
label="Processors Output", visible=True |
|
) |
|
competition_output = gr.Textbox( |
|
label="Up-tree Competition Output", visible=True |
|
) |
|
supervisor_output = gr.Textbox( |
|
label="Supervisor Output", visible=True |
|
) |
|
|
|
|
|
forward_button.click( |
|
fn=forward, |
|
inputs=[query, text, image, state], |
|
outputs=[ |
|
processors_output, |
|
competition_output, |
|
supervisor_output, |
|
state, |
|
], |
|
) |
|
return interface_tab |
|
|
|
|
|
def main(): |
|
with gr.Blocks( |
|
css="""#chat_container {height: 820px; width: 1000px; margin-left: auto; margin-right: auto;} |
|
#chatbot {height: 600px; overflow: auto;} |
|
#create_container {height: 750px; margin-left: 0px; margin-right: 0px;} |
|
#tokenizer_renderer span {white-space: pre-wrap} |
|
""" |
|
) as demo: |
|
with gr.Row(): |
|
introduction() |
|
with gr.Row(): |
|
processor_tab() |
|
with gr.Row(): |
|
interface_tab() |
|
return demo |
|
|
|
|
|
def start_demo(): |
|
demo = main() |
|
if DEPLOYED: |
|
demo.queue(api_open=False).launch(show_api=False) |
|
else: |
|
demo.queue() |
|
demo.launch(share=False, server_name="0.0.0.0") |
|
|
|
|
|
if __name__ == "__main__": |
|
start_demo() |
|
|