Spaces:
Running
Running
Sean-Case
commited on
Commit
•
1ae7b34
1
Parent(s):
9a3229c
Remove ctransformers model load (not needed yet). Used faster keyword model
Browse files- chatfuncs/chatfuncs.py +3 -2
chatfuncs/chatfuncs.py
CHANGED
@@ -83,10 +83,11 @@ ner_model = SpanMarkerModel.from_pretrained("tomaarsen/span-marker-mbert-base-mu
|
|
83 |
|
84 |
## Initialise keyword model ##
|
85 |
# Used to pull out keywords from chat history to add to user queries behind the scenes
|
86 |
-
kw_model = pipeline("feature-extraction", model="
|
87 |
|
88 |
## Chat models ##
|
89 |
-
ctrans_llm =
|
|
|
90 |
#ctrans_llm = AutoModelForCausalLM.from_pretrained('TheBloke/orca_mini_3B-GGML', model_type='llama', model_file='orca-mini-3b.ggmlv3.q8_0.bin')
|
91 |
#gpt4all_model = GPT4All(model_name= "orca-mini-3b.ggmlv3.q4_0.bin", model_path="models/") # "ggml-mpt-7b-chat.bin"
|
92 |
|
|
|
83 |
|
84 |
## Initialise keyword model ##
|
85 |
# Used to pull out keywords from chat history to add to user queries behind the scenes
|
86 |
+
kw_model = pipeline("feature-extraction", model="sentence-transformers/all-MiniLM-L6-v2")
|
87 |
|
88 |
## Chat models ##
|
89 |
+
ctrans_llm = [] # Not leaded by default
|
90 |
+
#ctrans_llm = AutoModelForCausalLM.from_pretrained('TheBloke/orca_mini_3B-GGML', model_type='llama', model_file='orca-mini-3b.ggmlv3.q4_0.bin')
|
91 |
#ctrans_llm = AutoModelForCausalLM.from_pretrained('TheBloke/orca_mini_3B-GGML', model_type='llama', model_file='orca-mini-3b.ggmlv3.q8_0.bin')
|
92 |
#gpt4all_model = GPT4All(model_name= "orca-mini-3b.ggmlv3.q4_0.bin", model_path="models/") # "ggml-mpt-7b-chat.bin"
|
93 |
|