rphrp1985 commited on
Commit
7c3e16e
1 Parent(s): 82c113d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -51,19 +51,19 @@ model_id = "CohereForAI/c4ai-command-r-plus-4bit"
51
 
52
  tokenizer = AutoTokenizer.from_pretrained(
53
  # model_id
54
- 'CohereForAI/aya-23-8B'
55
  , token= token,)
56
 
57
 
58
- # accelerator = Accelerator()
59
 
60
  model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
61
  # torch_dtype= torch.uint8,
62
- torch_dtype=torch.float32,
63
  # load_in_16bit=True,
64
  # # # torch_dtype=torch.fl,
65
- # attn_implementation="flash_attention_2",
66
- # low_cpu_mem_usage=True,
67
  # device_map='cuda',
68
  # device_map=accelerator.device_map,
69
 
@@ -71,7 +71,7 @@ model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
71
 
72
 
73
  #
74
- # model = accelerator.prepare(model)
75
 
76
 
77
  # device_map = infer_auto_device_map(model, max_memory={0: "79GB", "cpu":"65GB" })
@@ -102,7 +102,7 @@ def respond(
102
 
103
  messages= json_obj
104
 
105
- input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to('cuda') # .to(accelerator.device)
106
  input_ids2 = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True, return_tensors="pt") #.to('cuda')
107
  print(f"Converted input_ids dtype: {input_ids.dtype}")
108
  input_str= str(input_ids2)
 
51
 
52
  tokenizer = AutoTokenizer.from_pretrained(
53
  # model_id
54
+ model_id
55
  , token= token,)
56
 
57
 
58
+ accelerator = Accelerator()
59
 
60
  model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
61
  # torch_dtype= torch.uint8,
62
+ torch_dtype=torch.float16,
63
  # load_in_16bit=True,
64
  # # # torch_dtype=torch.fl,
65
+ attn_implementation="flash_attention_2",
66
+ low_cpu_mem_usage=True,
67
  # device_map='cuda',
68
  # device_map=accelerator.device_map,
69
 
 
71
 
72
 
73
  #
74
+ model = accelerator.prepare(model)
75
 
76
 
77
  # device_map = infer_auto_device_map(model, max_memory={0: "79GB", "cpu":"65GB" })
 
102
 
103
  messages= json_obj
104
 
105
+ input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(accelerator.device)
106
  input_ids2 = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True, return_tensors="pt") #.to('cuda')
107
  print(f"Converted input_ids dtype: {input_ids.dtype}")
108
  input_str= str(input_ids2)