torotoki commited on
Commit
cecf105
1 Parent(s): 5217d78

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +7 -2
README.md CHANGED
@@ -18,12 +18,16 @@ This model is released under the Apache License 2.0.
18
 
19
  ## Usage
20
  Install the required libraries as follows:
21
- ```bash
22
  >>> python -m pip install numpy safetensors sentencepiece torch transformers
 
23
  ```
24
 
25
  Execute the following python code:
26
  ```python
 
 
 
27
  def completion(prompt: str, max_new_tokens: int = 128) -> str:
28
  inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
29
  generated_ids = model.generate(
@@ -33,7 +37,8 @@ def completion(prompt: str, max_new_tokens: int = 128) -> str:
33
  max_new_tokens=max_new_tokens,
34
  temperature=1,
35
  top_p=0.95,
36
- do_sample=True,)
 
37
  return tokenizer.decode(generated_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
38
 
39
  def generate_prompt(messages: list) -> str:
 
18
 
19
  ## Usage
20
  Install the required libraries as follows:
21
+ ```sh
22
  >>> python -m pip install numpy safetensors sentencepiece torch transformers
23
+
24
  ```
25
 
26
  Execute the following python code:
27
  ```python
28
+ tokenizer = AutoTokenizer.from_pretrained("pfnet/plamo-13b-instruct", trust_remote_code=True)
29
+ model = AutoModelForCausalLM.from_pretrained("pfnet/plamo-13b-instruct", trust_remote_code=True)
30
+
31
  def completion(prompt: str, max_new_tokens: int = 128) -> str:
32
  inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
33
  generated_ids = model.generate(
 
37
  max_new_tokens=max_new_tokens,
38
  temperature=1,
39
  top_p=0.95,
40
+ do_sample=True,
41
+ )
42
  return tokenizer.decode(generated_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
43
 
44
  def generate_prompt(messages: list) -> str: