vishal042002
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -27,6 +27,34 @@ The model was trained on a custom dataset containing clinical surgery Q&A pairs.
|
|
27 |
Open-source medical books
|
28 |
Medical catalogs
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
This model is designed to:
|
31 |
|
32 |
Answer questions about clinical surgery procedures.
|
|
|
27 |
Open-source medical books
|
28 |
Medical catalogs
|
29 |
|
30 |
+
Ruuning the model through Adapter Merge:
|
31 |
+
|
32 |
+
```python
|
33 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
34 |
+
from peft import PeftModel
|
35 |
+
import torch
|
36 |
+
|
37 |
+
base_model_name = "unsloth/Llama-3.2-3B-Instruct"
|
38 |
+
base_model = AutoModelForCausalLM.from_pretrained(base_model_name, torch_dtype=torch.float16, device_map="auto")
|
39 |
+
|
40 |
+
adapter_path = "vishal042002/Llama3.2-3b-Instruct-ClinicalSurgery"
|
41 |
+
base_model = PeftModel.from_pretrained(base_model, adapter_path)
|
42 |
+
|
43 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
|
44 |
+
|
45 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
46 |
+
base_model.to(device)
|
47 |
+
|
48 |
+
# Sample usage
|
49 |
+
input_text = "What is the mortality rate for patients requiring surgical intervention who were unstable preoperatively?"
|
50 |
+
inputs = tokenizer(input_text, return_tensors="pt").to(device)
|
51 |
+
|
52 |
+
outputs = base_model.generate(**inputs, max_new_tokens=200, temperature=1.5, top_p=0.9)
|
53 |
+
decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
54 |
+
|
55 |
+
print(decoded_output)
|
56 |
+
```
|
57 |
+
|
58 |
This model is designed to:
|
59 |
|
60 |
Answer questions about clinical surgery procedures.
|