Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -19,13 +19,12 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig,
|
|
19 |
###############
|
20 |
os.system("pip install -r requirements.txt")
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
model_name_or_path = "bofenghuang/vigogne-2-7b-chat"
|
25 |
-
revision = "v2.0"
|
26 |
|
27 |
-
|
28 |
-
|
|
|
29 |
|
30 |
|
31 |
st.set_page_config(page_title="Gemini RAG", layout="wide")
|
@@ -142,8 +141,8 @@ def user_input(user_question, api_key):
|
|
142 |
chain = get_conversational_chain()
|
143 |
response_gemini = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
|
144 |
|
145 |
-
# Initialize the Hugging Face
|
146 |
-
pipeline =
|
147 |
|
148 |
# Prompt template for making the response more conversational
|
149 |
prompt_template = f"""
|
@@ -166,6 +165,7 @@ def user_input(user_question, api_key):
|
|
166 |
|
167 |
|
168 |
|
|
|
169 |
def main():
|
170 |
st.header("RAG based LLM Application")
|
171 |
|
|
|
19 |
###############
|
20 |
os.system("pip install -r requirements.txt")
|
21 |
|
22 |
+
import torch
|
23 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
|
24 |
|
25 |
+
model = AutoModelForCausalLM.from_pretrained("Writer/palmyra-small")
|
26 |
+
|
27 |
+
tokenizer = AutoTokenizer.from_pretrained("Writer/palmyra-small")
|
28 |
|
29 |
|
30 |
st.set_page_config(page_title="Gemini RAG", layout="wide")
|
|
|
141 |
chain = get_conversational_chain()
|
142 |
response_gemini = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
|
143 |
|
144 |
+
# Initialize the Hugging Face text generation pipeline with your custom model
|
145 |
+
pipeline = TextGeneratorPipeline(model="Writer/palmyra-small")
|
146 |
|
147 |
# Prompt template for making the response more conversational
|
148 |
prompt_template = f"""
|
|
|
165 |
|
166 |
|
167 |
|
168 |
+
|
169 |
def main():
|
170 |
st.header("RAG based LLM Application")
|
171 |
|