File size: 7,251 Bytes
dc7c3c1 3a62692 1510f49 a9c1d92 dc7c3c1 fc2712b dc7c3c1 fc2712b dc7c3c1 1510f49 d98f331 a917903 a9c1d92 a917903 dc7c3c1 d98f331 dc7c3c1 a9c1d92 3a62692 a9c1d92 dc7c3c1 b4021c8 dc7c3c1 d98f331 a9c1d92 dc7c3c1 3a62692 a9c1d92 3a62692 cbe01c4 a9c1d92 3a62692 d98f331 3a62692 d98f331 3a62692 a9c1d92 3a62692 d98f331 a9c1d92 3a62692 a9c1d92 3a62692 a9c1d92 3a62692 d98f331 a9c1d92 3a62692 a9c1d92 3a62692 dc7c3c1 acb3380 a9c1d92 dc7c3c1 1510f49 d98f331 1510f49 dc7c3c1 a917903 dc7c3c1 0b43ea7 a917903 dc7c3c1 1510f49 dc7c3c1 d98f331 1510f49 dc7c3c1 1510f49 d98f331 1510f49 dc7c3c1 1510f49 dc7c3c1 d98f331 1510f49 dc7c3c1 d98f331 dc7c3c1 d98f331 dc7c3c1 d98f331 dc7c3c1 1510f49 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 |
import os
import sys
import gradio as gr
sys.path.append("./ctm")
from ctm.ctms.ctm_base import BaseConsciousnessTuringMachine
ctm = BaseConsciousnessTuringMachine()
ctm.add_supervisor("gpt4_supervisor")
DEPLOYED = os.getenv("DEPLOYED", "true").lower() == "true"
def convert_base64(image_array):
image = Image.fromarray(image_array)
buffer = io.BytesIO()
image.save(buffer, format="PNG")
byte_data = buffer.getvalue()
base64_string = base64.b64encode(byte_data).decode("utf-8")
return base64_string
def introduction():
with gr.Column(scale=2):
gr.Image("images/banner.jpg", elem_id="banner-image", show_label=False)
def add_processor(processor_name, display_name, state):
print("add processor ", processor_name)
ctm.add_processor(processor_name)
print(ctm.processor_group_map)
print(len(ctm.processor_list))
return gr.Button(
value=display_name,
elem_id="selected"
)
def processor_tab():
# Categorized model names
text_processors = [
"gpt4_text_emotion_processor",
"gpt4_text_summary_processor",
"gpt4_speaker_intent_processor",
"roberta_text_sentiment_processor",
]
vision_processors = [
"gpt4v_cloth_fashion_processor",
"gpt4v_face_emotion_processor",
"gpt4v_ocr_processor",
"gpt4v_posture_processor",
"gpt4v_scene_location_processor",
]
with gr.Accordion('Select your processors here.'):
with gr.Row():
with gr.Blocks():
for model_name in text_processors:
display_name = (
model_name.replace("processor", "")
.replace("_", " ")
.title()
)
button = gr.Button(
value=display_name,
elem_id="unselected"
)
processor_name = gr.Textbox(
value=model_name, visible=False
)
display_name = gr.Textbox(
value=display_name, visible=False
)
button.click(
fn=add_processor,
inputs=[processor_name, display_name, gr.State()],
outputs=[button],
)
for model_name in vision_processors:
display_name = (
model_name.replace("processor", "")
.replace("_", " ")
.title()
)
button = gr.Button(
value=display_name,
elem_id="unselected"
)
processor_name = gr.Textbox(
value=model_name, visible=False
)
display_name = gr.Textbox(
value=display_name, visible=False
)
button.click(
fn=add_processor,
inputs=[processor_name, display_name, gr.State()],
outputs=[button],
)
def forward(query, content, image, state):
state["question"] = query
ask_processors_output_info, state = ask_processors(
query, content, image, state
)
uptree_competition_output_info, state = uptree_competition(state)
ask_supervisor_output_info, state = ask_supervisor(state)
ctm.downtree_broadcast(state["winning_output"])
ctm.link_form(state["processor_output"])
return (
ask_processors_output_info,
uptree_competition_output_info,
ask_supervisor_output_info,
state,
gr.Button(
value="Update CTM",
elem_id="selected-ctm",
)
)
def ask_processors(query, text, image, state):
# Simulate processing here
processor_output = ctm.ask_processors(
query=query,
text=text,
image=image,
)
output_info = ""
for name, info in processor_output.items():
gist = info["gist"].replace("\n", "").strip()
output_info += f"<{name}>\n{gist}\n\n"
state["processor_output"] = processor_output
return output_info, state
def uptree_competition(state):
winning_output = ctm.uptree_competition(state["processor_output"])
state["winning_output"] = winning_output
output_info = (
"<{}>\n{}".format(
winning_output["name"], winning_output["gist"].replace("\n", "").strip()
)
)
return output_info, state
def ask_supervisor(state):
question = state["question"]
winning_output = state["winning_output"]
answer, score = ctm.ask_supervisor(question, winning_output)
output_info = answer
state["answer"] = answer
state["score"] = score
return output_info, state
def input_tab():
state = gr.State({}) # State to hold and pass values
with gr.Accordion("Enter your input here."):
with gr.Row():
query = gr.Textbox(label="Query", placeholder="Type your query here", lines=3)
with gr.Row():
text = gr.Textbox(label="Text Input", placeholder="Input text data", lines=11)
image = gr.Image(label="Image Input")
return query, text, image, state
def output_tab(query, text, image, state):
with gr.Accordion("Check your outputs here."):
processors_output = gr.Textbox(label="STM Chunks", visible=True, lines=5)
competition_output = gr.Textbox(label="Winning Chunk", visible=True, lines=3)
supervisor_output = gr.Textbox(label="Answer", visible=True, lines=2)
forward_button = gr.Button("Launch CTM", elem_id="unselected-ctm")
forward_button.click(
fn=forward,
inputs=[query, text, image, state],
outputs=[
processors_output,
competition_output,
supervisor_output,
state,
forward_button,
],
)
def main():
with gr.Blocks(
css="""#chat_container {height: 820px; width: 1000px; margin-left: auto; margin-right: auto;}
#chatbot {height: 600px; overflow: auto;}
#create_container {height: 750px; margin-left: 0px; margin-right: 0px;}
#tokenizer_renderer span {white-space: pre-wrap}
#selected {background-color: orange; width: 180px}
#unselected {width: 180px;}
#selected-ctm {background-color: orange;}
#unselected-ctm {}
""",
theme="gradio/monochrome",
) as demo:
with gr.Row():
introduction()
with gr.Row():
with gr.Column():
processor_tab()
query, text, image, state = input_tab()
with gr.Column():
output_tab(query, text, image, state)
return demo
def start_demo():
demo = main()
if DEPLOYED:
demo.queue(api_open=False).launch(show_api=False)
else:
demo.queue()
demo.launch(share=False, server_name="0.0.0.0")
if __name__ == "__main__":
start_demo()
|