rphrp1985 commited on
Commit
b43bcfd
1 Parent(s): 54d138e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  import spaces
3
  from huggingface_hub import InferenceClient
4
-
5
  """
6
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
  """
@@ -9,10 +9,11 @@ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
  # pip install 'git+https://github.com/huggingface/transformers.git'
10
 
11
  from transformers import AutoTokenizer, AutoModelForCausalLM
12
-
 
13
  model_id = "CohereForAI/c4ai-command-r-plus"
14
- tokenizer = AutoTokenizer.from_pretrained(model_id)
15
- model = AutoModelForCausalLM.from_pretrained(model_id)
16
 
17
  # Format message with the command-r-plus chat template
18
  messages = [{"role": "user", "content": "Hello, how are you?"}]
 
1
  import gradio as gr
2
  import spaces
3
  from huggingface_hub import InferenceClient
4
+ import os
5
  """
6
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
  """
 
9
  # pip install 'git+https://github.com/huggingface/transformers.git'
10
 
11
  from transformers import AutoTokenizer, AutoModelForCausalLM
12
+ token=os.getenv('token')
13
+ print('token = ',token)
14
  model_id = "CohereForAI/c4ai-command-r-plus"
15
+ tokenizer = AutoTokenizer.from_pretrained(model_id, token= token)
16
+ model = AutoModelForCausalLM.from_pretrained(model_id, token= token)
17
 
18
  # Format message with the command-r-plus chat template
19
  messages = [{"role": "user", "content": "Hello, how are you?"}]