KingNish commited on
Commit
489c872
1 Parent(s): aaace13

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +3 -12
chatbot.py CHANGED
@@ -200,11 +200,7 @@ generate_kwargs = dict( max_new_tokens=4000, do_sample=True, stream=True, detail
200
  system_llava = "<|im_start|>system\nYou are OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. Your task is to fulfill users query in best possible way. You are provided with image, videos and 3d structures as input with question your task is to give best possible result and explaination to user.<|im_end|>"
201
 
202
  @spaces.GPU(duration=60, queue=False)
203
- def model_inference(
204
- user_prompt,
205
- chat_history,
206
- web_search,
207
- ):
208
  # Define generation_args at the beginning of the function
209
  generation_args = {}
210
 
@@ -251,12 +247,7 @@ def model_inference(
251
  output += response.token.text
252
  yield output
253
  else:
254
- if user_prompt["files"]:
255
- image = user_prompt["files"][-1]
256
- else:
257
- for hist in history:
258
- if type(hist[0])==tuple:
259
- image = hist[0][0]
260
 
261
  txt = user_prompt["text"]
262
  img = user_prompt["files"]
@@ -279,7 +270,7 @@ def model_inference(
279
 
280
  inputs = processor(prompt, image, return_tensors="pt").to("cuda", torch.float16)
281
  streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True})
282
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=2048, do_sample=True)
283
  generated_text = ""
284
 
285
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
 
200
  system_llava = "<|im_start|>system\nYou are OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. Your task is to fulfill users query in best possible way. You are provided with image, videos and 3d structures as input with question your task is to give best possible result and explaination to user.<|im_end|>"
201
 
202
  @spaces.GPU(duration=60, queue=False)
203
+ def model_inference( user_prompt, chat_history, web_search):
 
 
 
 
204
  # Define generation_args at the beginning of the function
205
  generation_args = {}
206
 
 
247
  output += response.token.text
248
  yield output
249
  else:
250
+ image = user_prompt["files"][-1]
 
 
 
 
 
251
 
252
  txt = user_prompt["text"]
253
  img = user_prompt["files"]
 
270
 
271
  inputs = processor(prompt, image, return_tensors="pt").to("cuda", torch.float16)
272
  streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True})
273
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024, do_sample=True)
274
  generated_text = ""
275
 
276
  thread = Thread(target=model.generate, kwargs=generation_kwargs)