alexkueck commited on
Commit
0c9076b
1 Parent(s): ec1c3ce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -6,7 +6,7 @@ import gradio as gr
6
  import torch
7
  from utils import *
8
  from presets import *
9
- from transformers import GPT2Tokenizer, AutoModelForCausalLM
10
  from huggingface_hub import login
11
 
12
  #antwort=""
@@ -25,7 +25,7 @@ from huggingface_hub import login
25
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = True
26
  #base_model = "TheBloke/vicuna-13B-1.1-HF" #load_8bit = ?
27
  #base_model = "lmsys/vicuna-13b-v1.3"
28
- base_model = "gpt2-xl"
29
 
30
  ####################################
31
  #Model und Tokenzier laden
@@ -35,7 +35,7 @@ base_model = "gpt2-xl"
35
  #Alternativ: Model und Tokenizer direkt laden
36
  #login(token=os.environ["HF_ACCESS_TOKEN"])
37
  tokenizer = GPT2Tokenizer.from_pretrained(base_model, use_fast = True, use_auth_token=False)
38
- model = GPT2LMHeadModel.from_pretrained('gpt2') # options: ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']
39
 
40
 
41
  ########################################################################
 
6
  import torch
7
  from utils import *
8
  from presets import *
9
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
10
  from huggingface_hub import login
11
 
12
  #antwort=""
 
25
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = True
26
  #base_model = "TheBloke/vicuna-13B-1.1-HF" #load_8bit = ?
27
  #base_model = "lmsys/vicuna-13b-v1.3"
28
+ base_model = "gpt2-xl" # options: ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']
29
 
30
  ####################################
31
  #Model und Tokenzier laden
 
35
  #Alternativ: Model und Tokenizer direkt laden
36
  #login(token=os.environ["HF_ACCESS_TOKEN"])
37
  tokenizer = GPT2Tokenizer.from_pretrained(base_model, use_fast = True, use_auth_token=False)
38
+ model = GPT2LMHeadModel.from_pretrained(base_model)
39
 
40
 
41
  ########################################################################