gokaygokay commited on
Commit
225e7ea
1 Parent(s): 2fb3511

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -4
README.md CHANGED
@@ -1,16 +1,19 @@
1
  ---
2
- language:
3
- - en
4
  library_name: transformers
5
  tags:
6
  - art
7
  datasets:
8
  - gokaygokay/prompt_description_stable_diffusion_3k
 
 
9
  pipeline_tag: text2text-generation
10
  ---
11
 
12
  ```
13
- from transformers import AutoModelForCausalLM , GenerationConfig
 
 
 
14
  import torch
15
  import os
16
 
@@ -18,6 +21,8 @@ model_id = "gokaygokay/tiny_llama_chat_description_to_prompt"
18
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, load_in_8bit=False,
19
  device_map="auto",
20
  trust_remote_code=True)
 
 
21
 
22
  def generate_response(user_input):
23
 
@@ -28,7 +33,6 @@ def generate_response(user_input):
28
  top_k=5,temperature=0.9,repetition_penalty=1.2,
29
  max_new_tokens=100,pad_token_id=tokenizer.eos_token_id
30
  )
31
- start_time = perf_counter()
32
 
33
  inputs = tokenizer(prompt, return_tensors="pt").to('cuda')
34
 
 
1
  ---
 
 
2
  library_name: transformers
3
  tags:
4
  - art
5
  datasets:
6
  - gokaygokay/prompt_description_stable_diffusion_3k
7
+ language:
8
+ - en
9
  pipeline_tag: text2text-generation
10
  ---
11
 
12
  ```
13
+
14
+ !pip install -q -U transformers trl accelerate peft bitsandbytes
15
+
16
+ from transformers import AutoModelForCausalLM, GenerationConfig, AutoTokenizer
17
  import torch
18
  import os
19
 
 
21
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, load_in_8bit=False,
22
  device_map="auto",
23
  trust_remote_code=True)
24
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
25
+ tokenizer.pad_token = tokenizer.eos_token
26
 
27
  def generate_response(user_input):
28
 
 
33
  top_k=5,temperature=0.9,repetition_penalty=1.2,
34
  max_new_tokens=100,pad_token_id=tokenizer.eos_token_id
35
  )
 
36
 
37
  inputs = tokenizer(prompt, return_tensors="pt").to('cuda')
38