Update utils.py
Browse files
utils.py
CHANGED
@@ -19,7 +19,7 @@ import operator
|
|
19 |
from typing import Annotated, Sequence, TypedDict
|
20 |
import pprint
|
21 |
|
22 |
-
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer, AutoModelForCausalLM
|
23 |
from sentence_transformers import SentenceTransformer, util
|
24 |
from typing import List, Dict
|
25 |
|
@@ -151,11 +151,16 @@ EMBEDDING_MODELL = "sentence-transformers/all-mpnet-base-v2"
|
|
151 |
|
152 |
#Modell und Tokenizer, um die Summary über die relevanten Texte zu machen
|
153 |
#mögliche Modelle: "HuggingFaceH4/zephyr-7b-alpha" #"t5-small" #"meta-llama/Meta-Llama-3-8B-Instruct" #"mistralai/Mistral-7B-Instruct-v0.3" #"microsoft/Phi-3-mini-4k-instruct" #"HuggingFaceH4/zephyr-7b-alpha"
|
|
|
|
|
|
|
|
|
|
|
154 |
HF_MODELL = "microsoft/Phi-3-mini-4k-instruct"
|
155 |
# Laden des Modells und Tokenizers
|
156 |
modell_rag = AutoModelForCausalLM.from_pretrained(HF_MODELL)
|
157 |
tokenizer_rag = AutoTokenizer.from_pretrained(HF_MODELL)
|
158 |
-
|
159 |
HF_MODELL = "t5-small"
|
160 |
modell_rag = AutoModelForSeq2SeqLM.from_pretrained(HF_MODELL)
|
161 |
tokenizer_rag = AutoTokenizer.from_pretrained(HF_MODELL)
|
|
|
19 |
from typing import Annotated, Sequence, TypedDict
|
20 |
import pprint
|
21 |
|
22 |
+
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer, AutoModelForCausalLM, GPTNeoForCausalLM, GPT2Tokenizer
|
23 |
from sentence_transformers import SentenceTransformer, util
|
24 |
from typing import List, Dict
|
25 |
|
|
|
151 |
|
152 |
#Modell und Tokenizer, um die Summary über die relevanten Texte zu machen
|
153 |
#mögliche Modelle: "HuggingFaceH4/zephyr-7b-alpha" #"t5-small" #"meta-llama/Meta-Llama-3-8B-Instruct" #"mistralai/Mistral-7B-Instruct-v0.3" #"microsoft/Phi-3-mini-4k-instruct" #"HuggingFaceH4/zephyr-7b-alpha"
|
154 |
+
HF_MODELL ="EleutherAI/gpt-neo-2.7B"
|
155 |
+
modell_rag = GPTNeoForCausalLM.from_pretrained(HF_MODELL)
|
156 |
+
tokenizer_rag = GPT2Tokenizer.from_pretrained(HF_MODELL)
|
157 |
+
|
158 |
+
"""
|
159 |
HF_MODELL = "microsoft/Phi-3-mini-4k-instruct"
|
160 |
# Laden des Modells und Tokenizers
|
161 |
modell_rag = AutoModelForCausalLM.from_pretrained(HF_MODELL)
|
162 |
tokenizer_rag = AutoTokenizer.from_pretrained(HF_MODELL)
|
163 |
+
|
164 |
HF_MODELL = "t5-small"
|
165 |
modell_rag = AutoModelForSeq2SeqLM.from_pretrained(HF_MODELL)
|
166 |
tokenizer_rag = AutoTokenizer.from_pretrained(HF_MODELL)
|