Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -9,14 +9,16 @@ import spaces
|
|
9 |
|
10 |
processor = AutoProcessor.from_pretrained("ucsahin/TraVisionLM-base", trust_remote_code=True)
|
11 |
model = AutoModelForCausalLM.from_pretrained("ucsahin/TraVisionLM-base", trust_remote_code=True)
|
|
|
12 |
|
13 |
model.to("cuda:0")
|
|
|
14 |
|
15 |
@spaces.GPU
|
16 |
def bot_streaming(message, history, max_tokens, temperature, top_p, top_k, repetition_penalty):
|
17 |
print(max_tokens, temperature, top_p, top_k, repetition_penalty)
|
18 |
-
if message
|
19 |
-
image = message
|
20 |
else:
|
21 |
# if there's no image uploaded for this turn, look for images in the past turns
|
22 |
for hist in history:
|
@@ -26,11 +28,10 @@ def bot_streaming(message, history, max_tokens, temperature, top_p, top_k, repet
|
|
26 |
if image is None:
|
27 |
gr.Error("Lütfen önce bir resim yükleyin.")
|
28 |
|
29 |
-
prompt = f"{message
|
30 |
image = Image.open(image).convert("RGB")
|
31 |
inputs = processor(text=prompt, images=image, return_tensors="pt").to("cuda:0")
|
32 |
|
33 |
-
streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True})
|
34 |
generation_kwargs = dict(
|
35 |
inputs, streamer=streamer, max_new_tokens=max_tokens,
|
36 |
do_sample=True, temperature=temperature, top_p=top_p,
|
@@ -38,18 +39,15 @@ def bot_streaming(message, history, max_tokens, temperature, top_p, top_k, repet
|
|
38 |
)
|
39 |
generated_text = ""
|
40 |
|
41 |
-
|
42 |
-
|
43 |
|
44 |
-
|
45 |
-
|
46 |
-
buffer = ""
|
47 |
-
for new_text in streamer:
|
48 |
-
buffer += new_text
|
49 |
-
generated_text_without_prompt = buffer[len(text_prompt):]
|
50 |
|
51 |
-
|
52 |
-
|
|
|
53 |
|
54 |
|
55 |
gr.set_static_paths(paths=["static/images/"])
|
|
|
9 |
|
10 |
processor = AutoProcessor.from_pretrained("ucsahin/TraVisionLM-base", trust_remote_code=True)
|
11 |
model = AutoModelForCausalLM.from_pretrained("ucsahin/TraVisionLM-base", trust_remote_code=True)
|
12 |
+
model_dpo = AutoModelForCausalLM.from_pretrained("ucsahin/TraVisionLM-DPO", trust_remote_code=True)
|
13 |
|
14 |
model.to("cuda:0")
|
15 |
+
model_dpo.to("cuda:0")
|
16 |
|
17 |
@spaces.GPU
|
18 |
def bot_streaming(message, history, max_tokens, temperature, top_p, top_k, repetition_penalty):
|
19 |
print(max_tokens, temperature, top_p, top_k, repetition_penalty)
|
20 |
+
if message['files']:
|
21 |
+
image = message['files'][-1]['path']
|
22 |
else:
|
23 |
# if there's no image uploaded for this turn, look for images in the past turns
|
24 |
for hist in history:
|
|
|
28 |
if image is None:
|
29 |
gr.Error("Lütfen önce bir resim yükleyin.")
|
30 |
|
31 |
+
prompt = f"{message['text']}"
|
32 |
image = Image.open(image).convert("RGB")
|
33 |
inputs = processor(text=prompt, images=image, return_tensors="pt").to("cuda:0")
|
34 |
|
|
|
35 |
generation_kwargs = dict(
|
36 |
inputs, streamer=streamer, max_new_tokens=max_tokens,
|
37 |
do_sample=True, temperature=temperature, top_p=top_p,
|
|
|
39 |
)
|
40 |
generated_text = ""
|
41 |
|
42 |
+
model_outputs = model.generate(**generation_kwargs)
|
43 |
+
dpo_outputs = model_dpo.generate(**generation_kwargs)
|
44 |
|
45 |
+
model_output_text = processor.decode(model_outputs, skip_special_tokens=True)[len(prompt)+1]
|
46 |
+
dpo_output_text = processor.decode(dpo_outputs, skip_special_tokens=True)[len(prompt)+1]
|
|
|
|
|
|
|
|
|
47 |
|
48 |
+
generated_text = f"<h3>Base model cevabı:</h3>\n{model_output_text}\n<h3>DPO model cevabı:</h3>\n{dpo_output_text}"
|
49 |
+
|
50 |
+
return generated_text
|
51 |
|
52 |
|
53 |
gr.set_static_paths(paths=["static/images/"])
|