Update app.py
Browse files
app.py
CHANGED
@@ -30,7 +30,7 @@ model.generation_config.eos_token_id = processor.tokenizer.eos_token_id
|
|
30 |
|
31 |
@spaces.GPU(duration=120)
|
32 |
def krypton(input, history):
|
33 |
-
|
34 |
print(f"History: {history}") # Debug history
|
35 |
|
36 |
if input["files"]:
|
@@ -60,7 +60,7 @@ def krypton(input, history):
|
|
60 |
print("Made the prompt")
|
61 |
|
62 |
try:
|
63 |
-
inputs = processor(prompt, images=image, return_tensors='pt').to('cuda', torch.float16)
|
64 |
print(f"Processed inputs: {inputs}")
|
65 |
except Exception as e:
|
66 |
print(f"Error processing inputs: {e}")
|
@@ -85,11 +85,11 @@ def krypton(input, history):
|
|
85 |
thread.start()
|
86 |
|
87 |
buffer = ""
|
88 |
-
time.sleep(0.5)
|
89 |
for new_text in streamer:
|
90 |
buffer += new_text
|
91 |
generated_text_without_prompt = buffer
|
92 |
-
time.sleep(0.06)
|
93 |
yield generated_text_without_prompt
|
94 |
|
95 |
|
|
|
30 |
|
31 |
@spaces.GPU(duration=120)
|
32 |
def krypton(input, history):
|
33 |
+
print(f"Input: {input}") # Debug input
|
34 |
print(f"History: {history}") # Debug history
|
35 |
|
36 |
if input["files"]:
|
|
|
60 |
print("Made the prompt")
|
61 |
|
62 |
try:
|
63 |
+
inputs = processor(text=prompt, images=image, return_tensors='pt').to('cuda', torch.float16)
|
64 |
print(f"Processed inputs: {inputs}")
|
65 |
except Exception as e:
|
66 |
print(f"Error processing inputs: {e}")
|
|
|
85 |
thread.start()
|
86 |
|
87 |
buffer = ""
|
88 |
+
# time.sleep(0.5)
|
89 |
for new_text in streamer:
|
90 |
buffer += new_text
|
91 |
generated_text_without_prompt = buffer
|
92 |
+
# time.sleep(0.06)
|
93 |
yield generated_text_without_prompt
|
94 |
|
95 |
|