yamete4 commited on
Commit
3908efa
1 Parent(s): aff0041

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +32 -1
README.md CHANGED
@@ -1,6 +1,9 @@
1
  ---
2
  library_name: peft
3
  base_model: shpotes/codegen-350M-mono
 
 
 
4
  ---
5
 
6
  # Model Card for Model ID
@@ -69,7 +72,35 @@ Users (both direct and downstream) should be made aware of the risks, biases and
69
 
70
  ## How to Get Started with the Model
71
 
72
- Use the code below to get started with the model.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
  [More Information Needed]
75
 
 
1
  ---
2
  library_name: peft
3
  base_model: shpotes/codegen-350M-mono
4
+ datasets:
5
+ - flytech/python-codes-25k
6
+ pipeline_tag: text-generation
7
  ---
8
 
9
  # Model Card for Model ID
 
72
 
73
  ## How to Get Started with the Model
74
 
75
+ import torch
76
+ from transformers import AutoModelForCausalLM, AutoTokenizer
77
+
78
+ tokenizer = AutoTokenizer.from_pretrained("shpotes/codegen-350M-mono")
79
+ model = AutoModelForCausalLM.from_pretrained("shpotes/codegen-350M-mono", trust_remote_code=True)
80
+
81
+ input_ids = tokenizer(
82
+ context,
83
+ truncation=True,
84
+ padding=True,
85
+ return_tensors='pt',
86
+ pad_token_id=pad_token_id,
87
+ ).input_ids
88
+
89
+ input_ids_len = input_ids.shape[1]
90
+
91
+ with torch.no_grad():
92
+ input_ids = input_ids
93
+ tokens = model.generate(
94
+ input_ids,
95
+ do_sample=True,
96
+ num_return_sequences=num_return_sequences,
97
+ temperature=temp,
98
+ max_length=input_ids_len + max_length_sample,
99
+ top_p=top_p,
100
+ use_cache=True,
101
+ )
102
+ text = tokenizer.batch_decode(tokens[:, input_ids_len:, ...])
103
+
104
 
105
  [More Information Needed]
106