Ankitajadhav commited on
Commit
ae3604c
1 Parent(s): 7aafc6d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -9,8 +9,8 @@ import chromadb
9
  from datasets import load_dataset
10
  # from transformers import AutoModelForCausalLM, AutoTokenizer
11
  import gradio as gr
12
- from gpt4all import GPT4All
13
- from pathlib import Path
14
 
15
 
16
  # Function to clear the cache
@@ -86,10 +86,9 @@ vector_store.populate_vectors(dataset=None)
86
  # load model orca-mini general purpose model
87
  # tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
88
  # model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
89
- model_name = 'Meta-Llama-3-8B-Instruct.Q4_0.gguf' # .gguf represents quantized model
90
- model_path = "gpt4all"
91
- # add path to download load the model locally, download once and load for subsequent inference
92
- model = GPT4All(model_name=model_name, model_path=model_path,device="cuda")
93
 
94
  # Define the chatbot response function
95
  def chatbot_response(user_input):
 
9
  from datasets import load_dataset
10
  # from transformers import AutoModelForCausalLM, AutoTokenizer
11
  import gradio as gr
12
+ from transformers import AutoTokenizer, MistralForCausalLM
13
+
14
 
15
 
16
  # Function to clear the cache
 
86
  # load model orca-mini general purpose model
87
  # tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
88
  # model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
89
+
90
+ model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
91
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
 
92
 
93
  # Define the chatbot response function
94
  def chatbot_response(user_input):