codefuse-admin commited on
Commit
0074412
1 Parent(s): 8d41a21

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -30,8 +30,8 @@ CodeFuse-13B is a 13 billion parameter code generation model trained on the GPT-
30
  import torch
31
  from transformers import AutoModelForCausalLM, AutoTokenizer
32
 
33
- tokenizer = AutoTokenizer.from_pretrained(("CodeFuse-13B-evol-instruct-4k"))
34
- model = AutoModelForCausalLM.from_pretrained(("CodeFuse-13B-evol-instruct-4k"), device_map="auto").half().eval()
35
 
36
  input_ids = tokenizer.encode("# language: Python\ndef quick_sort(array):\n", return_tensors="pt").to("cuda")
37
  output_ids = model.generate(input_ids, max_new_tokens=200)
@@ -70,8 +70,8 @@ CodeFuse-13B是基于GPT-NeoX框架训练的13B参数代码生成模型,能够
70
  import torch
71
  from transformers import AutoModelForCausalLM, AutoTokenizer
72
 
73
- tokenizer = AutoTokenizer.from_pretrained(("CodeFuse-13B-evol-instruct-4k"))
74
- model = AutoModelForCausalLM.from_pretrained(("CodeFuse-13B-evol-instruct-4k"), device_map="auto").half().eval()
75
 
76
  input_ids = tokenizer.encode("# language: Python\ndef quick_sort(array):\n", return_tensors="pt").to("cuda")
77
  output_ids = model.generate(input_ids, max_new_tokens=200)
 
30
  import torch
31
  from transformers import AutoModelForCausalLM, AutoTokenizer
32
 
33
+ tokenizer = AutoTokenizer.from_pretrained(("CodeFuse-13B"))
34
+ model = AutoModelForCausalLM.from_pretrained(("CodeFuse-13B"), device_map="auto").half().eval()
35
 
36
  input_ids = tokenizer.encode("# language: Python\ndef quick_sort(array):\n", return_tensors="pt").to("cuda")
37
  output_ids = model.generate(input_ids, max_new_tokens=200)
 
70
  import torch
71
  from transformers import AutoModelForCausalLM, AutoTokenizer
72
 
73
+ tokenizer = AutoTokenizer.from_pretrained(("CodeFuse-13B"))
74
+ model = AutoModelForCausalLM.from_pretrained(("CodeFuse-13B"), device_map="auto").half().eval()
75
 
76
  input_ids = tokenizer.encode("# language: Python\ndef quick_sort(array):\n", return_tensors="pt").to("cuda")
77
  output_ids = model.generate(input_ids, max_new_tokens=200)