Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -323,42 +323,24 @@ def wrap_text(text, width=90):
|
|
323 |
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-1_8B-Chat", trust_remote_code=True) # TruEra
|
324 |
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-1_8B-Chat", device_map="auto", trust_remote_code=True).eval()
|
325 |
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
# Encode the input text
|
330 |
-
encoded_input = tokenizer(formatted_input, return_tensors="pt").to(device)
|
331 |
-
|
332 |
-
# Generate a response using the model
|
333 |
-
output = model.generate(
|
334 |
-
**encoded_input,
|
335 |
-
max_length=512,
|
336 |
-
use_cache=True,
|
337 |
-
early_stopping=True,
|
338 |
-
pad_token_id=tokenizer.eos_token_id,
|
339 |
-
temperature=0.1,
|
340 |
-
do_sample=True
|
341 |
-
)
|
342 |
-
|
343 |
-
response_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
344 |
-
|
345 |
-
return response_text
|
346 |
|
|
|
347 |
class ChatBot:
|
348 |
def __init__(self):
|
349 |
-
self.history =
|
350 |
-
|
351 |
-
@staticmethod
|
352 |
-
def doctor(user_input, system_prompt="You are an expert medical analyst:"): # TruEra
|
353 |
-
formatted_input = f"{system_prompt}{user_input}"
|
354 |
-
user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
|
355 |
-
response = model.generate(input_ids=user_input_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
|
356 |
-
response_text = tokenizer.decode(response[0], skip_special_tokens=True)
|
357 |
-
return response_text
|
358 |
|
|
|
|
|
|
|
359 |
|
360 |
bot = ChatBot()
|
361 |
|
|
|
|
|
|
|
|
|
362 |
|
363 |
def process_summary_with_qwen(summary): # TruEra
|
364 |
system_prompt = "You are a medical instructor . Assess and describe the proper options to your students in minute detail. Propose a course of action for them to base their recommendations on based on your description."
|
|
|
323 |
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-1_8B-Chat", trust_remote_code=True) # TruEra
|
324 |
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-1_8B-Chat", device_map="auto", trust_remote_code=True).eval()
|
325 |
|
326 |
+
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-1_8B-Chat", trust_remote_code=True)
|
327 |
+
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-1_8B-Chat", device_map="auto", trust_remote_code=True).eval()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
328 |
|
329 |
+
# TruEra
|
330 |
class ChatBot:
|
331 |
def __init__(self):
|
332 |
+
self.history = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
333 |
|
334 |
+
def predict(self, user_input, system_prompt=""):
|
335 |
+
response, self.history = model.chat(tokenizer, user_input, history=self.history, system=system_prompt)
|
336 |
+
return response
|
337 |
|
338 |
bot = ChatBot()
|
339 |
|
340 |
+
# TruEra
|
341 |
+
def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
|
342 |
+
return bot.predict(user_input, system_prompt)
|
343 |
+
|
344 |
|
345 |
def process_summary_with_qwen(summary): # TruEra
|
346 |
system_prompt = "You are a medical instructor . Assess and describe the proper options to your students in minute detail. Propose a course of action for them to base their recommendations on based on your description."
|