diabolic6045 commited on
Commit
b90ecf2
·
verified ·
1 Parent(s): d7f9b3d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -6,15 +6,13 @@ from huggingface_hub import login
6
  import os
7
  login(os.environ['HF_KEY'])
8
 
9
- # Load the model and configuration
10
- config = PeftConfig.from_pretrained("diabolic6045/gemma-2-2b-chess-adapter")
11
- base_model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b")
12
- model = PeftModel.from_pretrained(base_model, "diabolic6045/gemma-2-2b-chess-adapter")
13
- model.tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b")
14
-
15
  # Define a function that takes user input and returns the model's output
16
  @spaces.GPU(duration=120)
17
  def generate_text(prompt):
 
 
 
 
18
  input_ids = model.tokenizer.encode(prompt, return_tensors="pt")
19
  output = model.generate(input_ids, max_length=100)
20
  return model.tokenizer.decode(output[0], skip_special_tokens=True)
 
6
  import os
7
  login(os.environ['HF_KEY'])
8
 
 
 
 
 
 
 
9
  # Define a function that takes user input and returns the model's output
10
  @spaces.GPU(duration=120)
11
  def generate_text(prompt):
12
+ config = PeftConfig.from_pretrained("diabolic6045/gemma-2-2b-chess-adapter")
13
+ base_model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b")
14
+ model = PeftModel.from_pretrained(base_model, "diabolic6045/gemma-2-2b-chess-adapter")
15
+ model.tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b")
16
  input_ids = model.tokenizer.encode(prompt, return_tensors="pt")
17
  output = model.generate(input_ids, max_length=100)
18
  return model.tokenizer.decode(output[0], skip_special_tokens=True)