nteku1 commited on
Commit
6f9572c
1 Parent(s): 8f49f6b

Push model using huggingface_hub.

Browse files
Files changed (4) hide show
  1. README.md +3 -3
  2. pytorch_model.bin +1 -1
  3. tokenizer.json +2 -16
  4. tokenizer_config.json +0 -7
README.md CHANGED
@@ -25,7 +25,7 @@ You can then generate text as follows:
25
  ```python
26
  from transformers import pipeline
27
 
28
- generator = pipeline("text-generation", model="nteku1//tmp/tmpbkqgwqeg/nteku1/final_ppomodel")
29
  outputs = generator("Hello, my llama is cute")
30
  ```
31
 
@@ -35,8 +35,8 @@ If you want to use the model for training or to obtain the outputs from the valu
35
  from transformers import AutoTokenizer
36
  from trl import AutoModelForCausalLMWithValueHead
37
 
38
- tokenizer = AutoTokenizer.from_pretrained("nteku1//tmp/tmpbkqgwqeg/nteku1/final_ppomodel")
39
- model = AutoModelForCausalLMWithValueHead.from_pretrained("nteku1//tmp/tmpbkqgwqeg/nteku1/final_ppomodel")
40
 
41
  inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
42
  outputs = model(**inputs, labels=inputs["input_ids"])
 
25
  ```python
26
  from transformers import pipeline
27
 
28
+ generator = pipeline("text-generation", model="nteku1//tmp/tmpfe6xmj5d/nteku1/final_ppomodel")
29
  outputs = generator("Hello, my llama is cute")
30
  ```
31
 
 
35
  from transformers import AutoTokenizer
36
  from trl import AutoModelForCausalLMWithValueHead
37
 
38
+ tokenizer = AutoTokenizer.from_pretrained("nteku1//tmp/tmpfe6xmj5d/nteku1/final_ppomodel")
39
+ model = AutoModelForCausalLMWithValueHead.from_pretrained("nteku1//tmp/tmpfe6xmj5d/nteku1/final_ppomodel")
40
 
41
  inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
42
  outputs = model(**inputs, labels=inputs["input_ids"])
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:493fd5811778d05732aa247eac9aaf072598a323dc06b46b29bce51892a8423c
3
  size 6652
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7cd8389a6e02674a56f20d180192353a7dd3e152ba6ef269dadecaffa9071eb
3
  size 6652
tokenizer.json CHANGED
@@ -1,21 +1,7 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 512,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
- "padding": {
10
- "strategy": {
11
- "Fixed": 512
12
- },
13
- "direction": "Right",
14
- "pad_to_multiple_of": null,
15
- "pad_id": 50256,
16
- "pad_type_id": 0,
17
- "pad_token": "<|endoftext|>"
18
- },
19
  "added_tokens": [
20
  {
21
  "id": 50256,
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 50256,
tokenizer_config.json CHANGED
@@ -16,15 +16,8 @@
16
  "": 0
17
  },
18
  "eos_token": "<|endoftext|>",
19
- "max_length": 512,
20
  "model_max_length": 1024,
21
- "pad_to_multiple_of": null,
22
  "pad_token": "<|endoftext|>",
23
- "pad_token_type_id": 0,
24
- "padding_side": "right",
25
- "stride": 0,
26
  "tokenizer_class": "GPT2Tokenizer",
27
- "truncation_side": "right",
28
- "truncation_strategy": "longest_first",
29
  "unk_token": "<|endoftext|>"
30
  }
 
16
  "": 0
17
  },
18
  "eos_token": "<|endoftext|>",
 
19
  "model_max_length": 1024,
 
20
  "pad_token": "<|endoftext|>",
 
 
 
21
  "tokenizer_class": "GPT2Tokenizer",
 
 
22
  "unk_token": "<|endoftext|>"
23
  }