kaikaidai commited on
Commit
fb9ce1d
·
verified ·
1 Parent(s): 1723e63

Added hf auth token to autotokenizer

Browse files
Files changed (1) hide show
  1. gen_api_answer.py +3 -3
gen_api_answer.py CHANGED
@@ -88,7 +88,7 @@ def get_prometheus_response(model_name, prompt, system_prompt=None, max_tokens=5
88
 
89
  # Apply chat template
90
  model_id = "prometheus-eval/prometheus-7b-v2.0"
91
- tokenizer = AutoTokenizer.from_pretrained(model_id)
92
  formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
93
 
94
  payload = {
@@ -125,8 +125,8 @@ def get_atla_response(model_name, prompt, system_prompt=None, max_tokens=500, te
125
  messages.append({"role": "user", "content": prompt})
126
 
127
  # Apply chat template
128
- model_id = "meta-llama/Llama-3.1-8B"
129
- tokenizer = AutoTokenizer.from_pretrained(model_id)
130
  formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
131
 
132
  payload = {
 
88
 
89
  # Apply chat template
90
  model_id = "prometheus-eval/prometheus-7b-v2.0"
91
+ tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_api_key)
92
  formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
93
 
94
  payload = {
 
125
  messages.append({"role": "user", "content": prompt})
126
 
127
  # Apply chat template
128
+ model_id = "AtlaAI/Atla-8B-preview"
129
+ tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_api_key)
130
  formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
131
 
132
  payload = {