Willy030125 commited on
Commit
767353b
1 Parent(s): 5b5b56e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +57 -0
README.md CHANGED
@@ -10,6 +10,7 @@ tags:
10
  based on Merak-7B-v4 Mistral<br>
11
  <br>
12
 
 
13
  ```python
14
  lora r=8
15
  lora_alpha=16
@@ -18,4 +19,60 @@ lora_dropout=0.05
18
  learning_rate = 2e-4
19
  lr_scheduler = "cosine"
20
  max_seq_length = 2048
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  ```
 
10
  based on Merak-7B-v4 Mistral<br>
11
  <br>
12
 
13
+ Some training params used:
14
  ```python
15
  lora r=8
16
  lora_alpha=16
 
19
  learning_rate = 2e-4
20
  lr_scheduler = "cosine"
21
  max_seq_length = 2048
22
+ ```
23
+
24
+ Inference:
25
+ ```python
26
+ import torch
27
+ from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, BitsAndBytesConfig, LlamaTokenizer
28
+ from peft import PeftModel, PeftConfig
29
+
30
+ model_name = "Ichsan2895/Merak-7B-v4"
31
+ adapter_name = "Willy030125/finetune-indoMMLU-Merak-7B-v1"
32
+
33
+ bnb_config = transformers.BitsAndBytesConfig(
34
+ load_in_4bit=True,
35
+ bnb_4bit_use_double_quant=True,
36
+ bnb_4bit_quant_type="nf4",
37
+ bnb_4bit_compute_dtype=torch.bfloat16
38
+ )
39
+
40
+ model = AutoModelForCausalLM.from_pretrained(
41
+ model_name,
42
+ quantization_config=bnb_config,
43
+ device_map="auto",
44
+ trust_remote_code=True
45
+ )
46
+
47
+ model = PeftModel.from_pretrained(model, adapters_name)
48
+ tokenizer = LlamaTokenizer.from_pretrained(model_name)
49
+
50
+ def generate_response(question: str) -> str:
51
+ chat = [
52
+ {"role": "system", "content": "Anda adalah Merak, sebuah model kecerdasan buatan yang dilatih oleh Muhammad Ichsan. Mohon jawab pertanyaan berikut dengan benar, faktual, dan ramah."},
53
+ {"role": "user", "content": question},
54
+ ]
55
+
56
+ prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
57
+ inputs = tokenizer(prompt, return_tensors="pt", return_attention_mask=True)
58
+
59
+ with torch.no_grad():
60
+ outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"),
61
+ attention_mask=inputs.attention_mask,
62
+ eos_token_id=tokenizer.eos_token_id,
63
+ pad_token_id=tokenizer.eos_token_id,
64
+ max_new_tokens=256)
65
+ response = tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0]
66
+
67
+ assistant_start = f'''{question} \n assistant\n '''
68
+ response_start = response.find(assistant_start)
69
+ return response[response_start + len(assistant_start) :].strip()
70
+
71
+ prompt = """Hewan pemakan tumbuhan dinamakan ...
72
+ A. Omnivora
73
+ B. Karnivora
74
+ C. Pengurai
75
+ D. Herbivora"""
76
+
77
+ print(generate_response(prompt))
78
  ```