Hunzla commited on
Commit
2e669c6
1 Parent(s): fb37126

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -17
app.py CHANGED
@@ -2,30 +2,16 @@
2
  import gradio as gr
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
  import torch
5
- from huggingface_hub import HfApi, HfFolder
6
-
7
- # Replace '<your_api_token>' with your actual API token
8
- api_token = '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'
9
-
10
- # Initialize the HfApi with the API token
11
- api = HfApi(token=api_token)
12
- print('loggedin')
13
- # Verify that you're logged in
14
- user = api.whoami()
15
- print(user)
16
  # Load model and tokenizer
17
  model_name = "meta-llama/Llama-2-7b-chat-hf"
18
  print("started loading model")
19
 
20
- api_token = "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" # Replace with your actual API token
21
-
22
  model = AutoModelForCausalLM.from_pretrained(
23
  model_name,
24
  low_cpu_mem_usage=True,
25
  return_dict=True,
26
 
27
- revision="main", # Or the desired revision
28
- token=api_token
29
  )
30
  # return_dict=True,
31
  # torch_dtype=torch.float16,
@@ -33,8 +19,6 @@ print("loaded model")
33
 
34
  tokenizer = AutoTokenizer.from_pretrained(
35
  model_name,
36
- revision="main",
37
- token=api_token
38
  # Or the desired revision
39
  )
40
 
 
2
  import gradio as gr
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
  import torch
 
 
 
 
 
 
 
 
 
 
 
5
  # Load model and tokenizer
6
  model_name = "meta-llama/Llama-2-7b-chat-hf"
7
  print("started loading model")
8
 
 
 
9
  model = AutoModelForCausalLM.from_pretrained(
10
  model_name,
11
  low_cpu_mem_usage=True,
12
  return_dict=True,
13
 
14
+ revision="main",
 
15
  )
16
  # return_dict=True,
17
  # torch_dtype=torch.float16,
 
19
 
20
  tokenizer = AutoTokenizer.from_pretrained(
21
  model_name,
 
 
22
  # Or the desired revision
23
  )
24