emeses commited on
Commit
aaaf098
·
1 Parent(s): 3214a9e

Update space

Browse files
Files changed (1) hide show
  1. app.py +1 -0
app.py CHANGED
@@ -4,6 +4,7 @@ model_name = "emeses/lab2_model"
4
  # Load the model with 8-bit quantization (works better on CPU)
5
  model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config={"load_in_8bit": True})
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
7
 
8
  # Function for Gradio interface
9
  def generate_text(prompt):
 
4
  # Load the model with 8-bit quantization (works better on CPU)
5
  model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config={"load_in_8bit": True})
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = model.to("cpu")
8
 
9
  # Function for Gradio interface
10
  def generate_text(prompt):