riccorl commited on
Commit
dd7fc58
1 Parent(s): 6e5acd0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -2
README.md CHANGED
@@ -13,7 +13,11 @@ import torch
13
  import transformers as tr
14
 
15
  tokenizer = tr.AutoTokenizer.from_pretrained("riccorl/Modello-Italia-9B")
16
- model = tr.AutoModelForCausalLM.from_pretrained("riccorl/Modello-Italia-9B", device_map="auto", torch_dtype=torch.bfloat16)
 
 
 
 
17
 
18
  MY_SYSTEM_PROMPT_SHORT = (
19
  "Tu sei Modello Italia, un modello di linguaggio naturale addestrato da iGenius."
@@ -23,7 +27,9 @@ messages = [
23
  {"role": "system", "content": MY_SYSTEM_PROMPT_SHORT},
24
  {"role": "user", "content": prompt},
25
  ]
26
- tokenized_chat = hf_tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
 
 
27
  out = model.generate(tokenized_chat.to("cuda"), max_new_tokens=200)
28
  ```
29
 
 
13
  import transformers as tr
14
 
15
  tokenizer = tr.AutoTokenizer.from_pretrained("riccorl/Modello-Italia-9B")
16
+ model = tr.AutoModelForCausalLM.from_pretrained(
17
+ "riccorl/Modello-Italia-9B",
18
+ device_map="auto",
19
+ torch_dtype=torch.bfloat16
20
+ )
21
 
22
  MY_SYSTEM_PROMPT_SHORT = (
23
  "Tu sei Modello Italia, un modello di linguaggio naturale addestrato da iGenius."
 
27
  {"role": "system", "content": MY_SYSTEM_PROMPT_SHORT},
28
  {"role": "user", "content": prompt},
29
  ]
30
+ tokenized_chat = tokenizer.apply_chat_template(
31
+ messages, tokenize=True, add_generation_prompt=True, return_tensors="pt"
32
+ )
33
  out = model.generate(tokenized_chat.to("cuda"), max_new_tokens=200)
34
  ```
35