|
import os |
|
import gradio as gr |
|
from ctm.ctms.ctm_base import BaseConsciousnessTuringMachine |
|
|
|
ctm = BaseConsciousnessTuringMachine() |
|
ctm.add_processor("gpt4_text_emotion_processor", group_name="group_1") |
|
ctm.add_processor("gpt4_text_summary_processor", group_name="group_1") |
|
ctm.add_supervisor("gpt4_supervisor") |
|
|
|
DEPLOYED = os.getenv("DEPLOYED", "true").lower() == "true" |
|
|
|
def introduction(): |
|
with gr.Column(scale=2): |
|
gr.Image( |
|
"images/sotopia.jpg", elem_id="banner-image", show_label=False |
|
) |
|
with gr.Column(scale=5): |
|
gr.Markdown( |
|
"""Consciousness Turing Machine Demo |
|
""" |
|
) |
|
|
|
def add_processor(processor_name): |
|
print('add processor ', processor_name) |
|
ctm.add_processor(processor_name) |
|
print(len(ctm.processor_list)) |
|
|
|
def processor_tab(): |
|
with gr.Row() as row: |
|
button1 = gr.Button("Text Emotion Analyzer") |
|
button2 = gr.Button("Text Summary Generator") |
|
|
|
invisible_input1 = gr.Textbox( |
|
value="gpt4_text_emotion_processor", |
|
visible=False |
|
) |
|
invisible_input2 = gr.Textbox( |
|
value="gpt4_text_summary_processor", |
|
visible=False |
|
) |
|
|
|
button1.click( |
|
fn=add_processor, |
|
inputs=[invisible_input1], |
|
) |
|
button2.click( |
|
fn=add_processor, |
|
inputs=[invisible_input2], |
|
) |
|
|
|
|
|
def forward(query, content, image, state): |
|
state['question'] = query |
|
ask_processors_output_info, state = ask_processors(query, content, image, state) |
|
uptree_competition_output_info, state = uptree_competition(state) |
|
ask_supervisor_output_info, state = ask_supervisor(state) |
|
|
|
ctm.downtree_broadcast(state['winning_output']) |
|
ctm.link_form(state['processor_output']) |
|
return ask_processors_output_info, uptree_competition_output_info, ask_supervisor_output_info, state |
|
|
|
|
|
def ask_processors(query, content, image, state): |
|
|
|
processor_output = ctm.ask_processors( |
|
question=query, |
|
context=content, |
|
image_path=None, |
|
audio_path=None, |
|
video_path=None |
|
) |
|
output_info = '' |
|
for name, info in processor_output.items(): |
|
output_info += f"{name}: {info['gist']}\n" |
|
state['processor_output'] = processor_output |
|
return output_info, state |
|
|
|
|
|
def uptree_competition(state): |
|
winning_output = ctm.uptree_competition( |
|
state['processor_output'] |
|
) |
|
state['winning_output'] = winning_output |
|
output_info = 'The winning processor is: {}\nThe winning gist is: {}\n'.format(winning_output['name'], winning_output['gist']) |
|
return output_info, state |
|
|
|
|
|
def ask_supervisor(state): |
|
question = state['question'] |
|
winning_output = state['winning_output'] |
|
answer, score = ctm.ask_supervisor(question, winning_output) |
|
output_info = f"The answer to the query \"{question}\" is: {answer}\nThe confidence for answering is: {score}\n" |
|
state['answer'] = answer |
|
state['score'] = score |
|
return output_info, state |
|
|
|
|
|
def interface_tab(): |
|
with gr.Blocks() as interface_tab: |
|
state = gr.State({}) |
|
|
|
with gr.Column(): |
|
|
|
content = gr.Textbox(label="Enter your text here") |
|
query = gr.Textbox(label="Enter your query here") |
|
image = gr.Image(label="Upload your image") |
|
audio = gr.Audio(label="Upload or Record Audio") |
|
video = gr.Video(label="Upload or Record Video") |
|
|
|
|
|
forward_button = gr.Button("Start CTM forward process") |
|
|
|
|
|
processors_output = gr.Textbox( |
|
label="Processors Output", |
|
visible=True |
|
) |
|
competition_output = gr.Textbox( |
|
label="Up-tree Competition Output", |
|
visible=True |
|
) |
|
supervisor_output = gr.Textbox( |
|
label="Supervisor Output", |
|
visible=True |
|
) |
|
|
|
|
|
forward_button.click( |
|
fn=forward, |
|
inputs=[query, content, image, state], |
|
outputs=[processors_output, competition_output, supervisor_output, state] |
|
) |
|
|
|
return interface_tab |
|
|
|
|
|
def main(): |
|
with gr.Blocks( |
|
css="""#chat_container {height: 820px; width: 1000px; margin-left: auto; margin-right: auto;} |
|
#chatbot {height: 600px; overflow: auto;} |
|
#create_container {height: 750px; margin-left: 0px; margin-right: 0px;} |
|
#tokenizer_renderer span {white-space: pre-wrap} |
|
""" |
|
) as demo: |
|
with gr.Row(): |
|
introduction() |
|
with gr.Row(): |
|
processor_tab() |
|
with gr.Row(): |
|
interface_tab() |
|
|
|
return demo |
|
|
|
|
|
def start_demo(): |
|
demo = main() |
|
if DEPLOYED: |
|
demo.queue(api_open=False).launch(show_api=False) |
|
else: |
|
demo.queue() |
|
demo.launch(share=False, server_name="0.0.0.0") |
|
|
|
|
|
if __name__ == "__main__": |
|
start_demo() |