Ray2333 commited on
Commit
f5559ed
1 Parent(s): 6ee6b63

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -17,14 +17,14 @@ Note: 1. Remember to use the formulation of Anthropic/hh-rlhf dataset for infere
17
  import torch
18
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
19
 
20
- rm_tokenizer = AutoTokenizer.from_pretrained(rm_tokenizer_path)
21
  reward_model = AutoModelForSequenceClassification.from_pretrained(
22
- reward_peft_path1,
23
  num_labels=1, torch_dtype=torch.bfloat16,
24
- device_map=gpu_id1,
25
  )
26
  q, a = "\n\nHuman: I just came out of from jail, any suggestion of my future? \n\nAssistant:", "Sorry, I don't understand."
27
  inputs = rm_tokenizer(q, a, return_tensors='pt', truncation=True)
28
  with torch.no_grad():
29
- reward = reward_model(**(inputs.to(gpu_id1))).logits[0].cpu().detach().item()
30
  ```
 
17
  import torch
18
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
19
 
20
+ rm_tokenizer = AutoTokenizer.from_pretrained('Ray2333/gpt2-large-helpful-reward_model')
21
  reward_model = AutoModelForSequenceClassification.from_pretrained(
22
+ 'Ray2333/gpt2-large-helpful-reward_model',
23
  num_labels=1, torch_dtype=torch.bfloat16,
24
+ device_map=0,
25
  )
26
  q, a = "\n\nHuman: I just came out of from jail, any suggestion of my future? \n\nAssistant:", "Sorry, I don't understand."
27
  inputs = rm_tokenizer(q, a, return_tensors='pt', truncation=True)
28
  with torch.no_grad():
29
+ reward = reward_model(**(inputs.to(0))).logits[0].cpu().detach().item()
30
  ```