evanperez commited on
Commit
7ff74b6
1 Parent(s): 6791a04

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -19,13 +19,12 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig,
19
  ###############
20
  os.system("pip install -r requirements.txt")
21
 
22
- #some model
23
- #tokenizer = AutoTokenizer.from_pretrained("bofenghuang/vigogne-2-7b-chat")
24
- model_name_or_path = "bofenghuang/vigogne-2-7b-chat"
25
- revision = "v2.0"
26
 
27
- tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, revision=revision, padding_side="right", use_fast=False)
28
- model = AutoModelForCausalLM.from_pretrained(model_name_or_path, revision=revision)
 
29
 
30
 
31
  st.set_page_config(page_title="Gemini RAG", layout="wide")
@@ -142,8 +141,8 @@ def user_input(user_question, api_key):
142
  chain = get_conversational_chain()
143
  response_gemini = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
144
 
145
- # Initialize the Hugging Face conversational pipeline with your custom model
146
- pipeline = ConversationalPipeline(model_name_or_path="bofenghuang/vigogne-2-7b-chat")
147
 
148
  # Prompt template for making the response more conversational
149
  prompt_template = f"""
@@ -166,6 +165,7 @@ def user_input(user_question, api_key):
166
 
167
 
168
 
 
169
  def main():
170
  st.header("RAG based LLM Application")
171
 
 
19
  ###############
20
  os.system("pip install -r requirements.txt")
21
 
22
+ import torch
23
+ from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
24
 
25
+ model = AutoModelForCausalLM.from_pretrained("Writer/palmyra-small")
26
+
27
+ tokenizer = AutoTokenizer.from_pretrained("Writer/palmyra-small")
28
 
29
 
30
  st.set_page_config(page_title="Gemini RAG", layout="wide")
 
141
  chain = get_conversational_chain()
142
  response_gemini = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
143
 
144
+ # Initialize the Hugging Face text generation pipeline with your custom model
145
+ pipeline = TextGeneratorPipeline(model="Writer/palmyra-small")
146
 
147
  # Prompt template for making the response more conversational
148
  prompt_template = f"""
 
165
 
166
 
167
 
168
+
169
  def main():
170
  st.header("RAG based LLM Application")
171