Update utils.py
Browse files
utils.py
CHANGED
@@ -160,13 +160,14 @@ EMBEDDING_MODELL = "sentence-transformers/all-mpnet-base-v2"
|
|
160 |
|
161 |
#Modell und Tokenizer, um die Summary über die relevanten Texte zu machen
|
162 |
#mögliche Modelle: "HuggingFaceH4/zephyr-7b-alpha" #"t5-small" #"meta-llama/Meta-Llama-3-8B-Instruct" #"mistralai/Mistral-7B-Instruct-v0.3" #"microsoft/Phi-3-mini-4k-instruct" #"HuggingFaceH4/zephyr-7b-alpha"
|
|
|
163 |
HF_MODELL = "distilbert-base-uncased-distilled-squad"
|
164 |
modell_rag = DistilBertForQuestionAnswering.from_pretrained(HF_MODELL)
|
165 |
tokenizer_rag = DistilBertTokenizer.from_pretrained(HF_MODELL)
|
166 |
qa_pipeline = pipeline("question-answering", model=modell_rag, tokenizer=tokenizer_rag)
|
167 |
|
168 |
|
169 |
-
|
170 |
HF_MODELL ="EleutherAI/gpt-neo-2.7B"
|
171 |
modell_rag = GPTNeoForCausalLM.from_pretrained(HF_MODELL)
|
172 |
tokenizer_rag = GPT2Tokenizer.from_pretrained(HF_MODELL)
|
|
|
160 |
|
161 |
#Modell und Tokenizer, um die Summary über die relevanten Texte zu machen
|
162 |
#mögliche Modelle: "HuggingFaceH4/zephyr-7b-alpha" #"t5-small" #"meta-llama/Meta-Llama-3-8B-Instruct" #"mistralai/Mistral-7B-Instruct-v0.3" #"microsoft/Phi-3-mini-4k-instruct" #"HuggingFaceH4/zephyr-7b-alpha"
|
163 |
+
"""
|
164 |
HF_MODELL = "distilbert-base-uncased-distilled-squad"
|
165 |
modell_rag = DistilBertForQuestionAnswering.from_pretrained(HF_MODELL)
|
166 |
tokenizer_rag = DistilBertTokenizer.from_pretrained(HF_MODELL)
|
167 |
qa_pipeline = pipeline("question-answering", model=modell_rag, tokenizer=tokenizer_rag)
|
168 |
|
169 |
|
170 |
+
|
171 |
HF_MODELL ="EleutherAI/gpt-neo-2.7B"
|
172 |
modell_rag = GPTNeoForCausalLM.from_pretrained(HF_MODELL)
|
173 |
tokenizer_rag = GPT2Tokenizer.from_pretrained(HF_MODELL)
|