minhdang commited on
Commit
3ea359d
1 Parent(s): dc0acc6

update check

Browse files
Files changed (1) hide show
  1. app.py +2 -0
app.py CHANGED
@@ -3,6 +3,7 @@ import gradio as gr
3
  import transformers
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
5
  import torch
 
6
 
7
  title = """
8
  # Welcome to 🌟Tonic's🫡Command-R
@@ -18,6 +19,7 @@ model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_c
18
 
19
  @spaces.GPU
20
  def generate_response(user_input, max_new_tokens, temperature):
 
21
  messages = [{"role": "user", "content": user_input}]
22
  input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
23
  input_ids = input_ids.to(model.device)
 
3
  import transformers
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
5
  import torch
6
+ import os
7
 
8
  title = """
9
  # Welcome to 🌟Tonic's🫡Command-R
 
19
 
20
  @spaces.GPU
21
  def generate_response(user_input, max_new_tokens, temperature):
22
+ os.system("nvidia-smi")
23
  messages = [{"role": "user", "content": user_input}]
24
  input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
25
  input_ids = input_ids.to(model.device)