srinidhidevaraj commited on
Commit
2747791
1 Parent(s): 604b05d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -10,6 +10,8 @@ import torch
10
  from accelerate import init_empty_weights
11
  # Load environment variables
12
  from transformers import AutoProcessor, AutoModelForPreTraining
 
 
13
  # Configure Gemini API
14
  # genai.configure(api_key=os.getenv("gkey2"))
15
 
@@ -25,7 +27,7 @@ prompt="<|image|><|begin_of_text|>You are a helpful assistant. Please respond to
25
 
26
  # Initialize the Llama model
27
  # model = Ollama(model="llama3.2")
28
- model_id = "meta-llama/Llama-3.2-11B-Vision"
29
 
30
  # model = MllamaForConditionalGeneration.from_pretrained(
31
  # model_id,
@@ -33,8 +35,11 @@ model_id = "meta-llama/Llama-3.2-11B-Vision"
33
  # device_map="auto",
34
  # )
35
  # processor = AutoProcessor.from_pretrained(model_id)
36
- processor = AutoProcessor.from_pretrained("meta-llama/Llama-3.2-11B-Vision")
37
- model = AutoModelForPreTraining.from_pretrained("meta-llama/Llama-3.2-11B-Vision")
 
 
 
38
 
39
  # Define function to get response from the model
40
  def get_gemin_response(input_text, img):
 
10
  from accelerate import init_empty_weights
11
  # Load environment variables
12
  from transformers import AutoProcessor, AutoModelForPreTraining
13
+ from transformers import MllamaForConditionalGeneration, AutoProcessor
14
+
15
  # Configure Gemini API
16
  # genai.configure(api_key=os.getenv("gkey2"))
17
 
 
27
 
28
  # Initialize the Llama model
29
  # model = Ollama(model="llama3.2")
30
+
31
 
32
  # model = MllamaForConditionalGeneration.from_pretrained(
33
  # model_id,
 
35
  # device_map="auto",
36
  # )
37
  # processor = AutoProcessor.from_pretrained(model_id)
38
+
39
+
40
+ model_id = "meta-llama/Llama-3.2-11B-Vision"
41
+ model = MllamaForConditionalGeneration.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16)
42
+ processor = AutoProcessor.from_pretrained(model_id)
43
 
44
  # Define function to get response from the model
45
  def get_gemin_response(input_text, img):