Spaces:
Paused
Paused
added chatGPT
Browse files
app.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
import os
|
|
|
|
|
|
|
2 |
|
3 |
-
os.system("pip install gradio==2.4.6")
|
4 |
import sys
|
5 |
import gradio as gr
|
6 |
|
@@ -81,7 +83,19 @@ classifier = BUILDIN_CLASSIFIER[vocabulary]
|
|
81 |
num_classes = len(metadata.thing_classes)
|
82 |
reset_cls_test(predictor.model, classifier, num_classes)
|
83 |
|
84 |
-
os.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
|
87 |
def inference(img):
|
@@ -93,6 +107,8 @@ def inference(img):
|
|
93 |
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
|
94 |
|
95 |
detected_objects = []
|
|
|
|
|
96 |
box_locations = outputs["instances"].pred_boxes
|
97 |
box_loc_screen = box_locations.tensor.cpu().numpy()
|
98 |
|
@@ -110,27 +126,31 @@ def inference(img):
|
|
110 |
"h": int(height),
|
111 |
}
|
112 |
)
|
|
|
|
|
|
|
113 |
|
114 |
-
|
|
|
|
|
115 |
|
|
|
|
|
|
|
|
|
|
|
116 |
|
117 |
-
title = "Detic"
|
118 |
|
119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
-
|
122 |
|
123 |
-
|
124 |
-
gr.Interface(
|
125 |
-
inference,
|
126 |
-
inputs=gr.inputs.Image(type="filepath"),
|
127 |
-
outputs=[
|
128 |
-
gr.outputs.Image(label="Visualization", type="pil"),
|
129 |
-
gr.outputs.JSON(label="Detected Objects"),
|
130 |
-
],
|
131 |
-
enable_queue=True,
|
132 |
-
title=title,
|
133 |
-
description=description,
|
134 |
-
article=article,
|
135 |
-
examples=examples,
|
136 |
-
).launch()
|
|
|
1 |
import os
|
2 |
+
from pyChatGPT import ChatGPT
|
3 |
+
|
4 |
+
os.system("pip install -U gradio")
|
5 |
|
|
|
6 |
import sys
|
7 |
import gradio as gr
|
8 |
|
|
|
83 |
num_classes = len(metadata.thing_classes)
|
84 |
reset_cls_test(predictor.model, classifier, num_classes)
|
85 |
|
86 |
+
session_token = os.environ.get("SessionToken")
|
87 |
+
|
88 |
+
|
89 |
+
def get_response_from_chatbot(text):
|
90 |
+
try:
|
91 |
+
api = ChatGPT(session_token)
|
92 |
+
resp = api.send_message(text)
|
93 |
+
api.refresh_auth()
|
94 |
+
api.reset_conversation()
|
95 |
+
response = resp["message"]
|
96 |
+
except:
|
97 |
+
response = "Sorry, I'm busy. Try again later."
|
98 |
+
return response
|
99 |
|
100 |
|
101 |
def inference(img):
|
|
|
107 |
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
|
108 |
|
109 |
detected_objects = []
|
110 |
+
object_list_str = []
|
111 |
+
|
112 |
box_locations = outputs["instances"].pred_boxes
|
113 |
box_loc_screen = box_locations.tensor.cpu().numpy()
|
114 |
|
|
|
126 |
"h": int(height),
|
127 |
}
|
128 |
)
|
129 |
+
object_list_str.append(
|
130 |
+
f"{predicted_label} - X:({int(x0)} Y: {int(y0)} Width {int(width)} Height: {int(height)})"
|
131 |
+
)
|
132 |
|
133 |
+
chat_gpt_response = get_response_from_chatbot(
|
134 |
+
f"You are an intelligent image captioner. I will hand you the objects and their position, and you should give me a detailed description for the photo. In this photo we have the following objects\n{object_list_str}"
|
135 |
+
)
|
136 |
|
137 |
+
return (
|
138 |
+
Image.fromarray(np.uint8(out.get_image())).convert("RGB"),
|
139 |
+
detected_objects,
|
140 |
+
chat_gpt_response,
|
141 |
+
)
|
142 |
|
|
|
143 |
|
144 |
+
with gr.Blocks() as demo:
|
145 |
+
gr.Markdown("# Detic+ChatGPT")
|
146 |
+
with gr.Column():
|
147 |
+
inp = gr.Image(label="Input Image", type="filepath")
|
148 |
+
btn_detic = gr.Button("Run Detic+ChatGPT")
|
149 |
+
with gr.Column():
|
150 |
+
outviz = gr.Image(label="Visualization", type="pil")
|
151 |
+
output_desc = gr.Textbox(label="chatGPT Description", lines=5)
|
152 |
+
outputjson = gr.JSON(label="Detected Objects")
|
153 |
|
154 |
+
btn_detic.click(fn=inference, inputs=inp, outputs=[outviz, outputjson, output_desc])
|
155 |
|
156 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|