LLM_MODEL_TYPE=huggingface | |
# LLM_MODEL_TYPE=openai | |
# LLM_MODEL_TYPE=hftgi | |
# LLM_MODEL_TYPE=ollama | |
# LLM_MODEL_TYPE=google | |
# LLM_MODEL_TYPE=vllm | |
HUGGINGFACE_AUTH_TOKEN= | |
HFTGI_SERVER_URL= | |
OPENAI_API_KEY= | |
GOOGLE_API_KEY= | |
# if unset, default to "gpt-3.5-turbo" | |
OPENAI_MODEL_NAME= | |
# GEMINI_MODEL_NAME=gemini-1.5-pro-latest | |
# OLLAMA_MODEL_NAME=orca2:7b | |
# OLLAMA_MODEL_NAME=mistral:7b | |
# OLLAMA_MODEL_NAME=gemma:7b | |
# OLLAMA_MODEL_NAME=llama2:7b | |
OLLAMA_MODEL_NAME=llama3:8b | |
OLLAMA_RP=1.15 | |
HF_RP=1.15 | |
LANGCHAIN_DEBUG=false | |
BATCH_SIZE=1 | |
APPLY_CHAT_TEMPLATE_FOR_RAG=true | |
# cpu, mps or cuda:0 - if unset, use whatever detected | |
HF_EMBEDDINGS_DEVICE_TYPE= | |
HF_PIPELINE_DEVICE_TYPE= | |
# uncomment one of the below to load corresponding quantized model | |
# LOAD_QUANTIZED_MODEL=4bit | |
# LOAD_QUANTIZED_MODEL=8bit | |
QA_WITH_RAG=true | |
# QA_WITH_RAG=false | |
RETRIEVER_TYPE=questions_file | |
# RETRIEVER_TYPE=vectorstore | |
QUESTIONS_FILE_PATH="./data/datasets/ms_macro.json" | |
DISABLE_MODEL_PRELOADING=true | |
CHAT_HISTORY_ENABLED=false | |
SHOW_PARAM_SETTINGS=false | |
SHARE_GRADIO_APP=false | |
# if unset, default to "hkunlp/instructor-xl" | |
HF_EMBEDDINGS_MODEL_NAME="hkunlp/instructor-large" | |
# number of cpu cores - used to set n_threads for GPT4ALL & LlamaCpp models | |
NUMBER_OF_CPU_CORES= | |
USING_TORCH_BFLOAT16=true | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="databricks/dolly-v2-3b" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="databricks/dolly-v2-7b" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="databricks/dolly-v2-12b" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="TheBloke/wizardLM-7B-HF" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="TheBloke/vicuna-7B-1.1-HF" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="nomic-ai/gpt4all-j" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="nomic-ai/gpt4all-falcon" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="lmsys/fastchat-t5-3b-v1.0" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="meta-llama/Llama-2-7b-chat-hf" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="meta-llama/Llama-2-13b-chat-hf" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="meta-llama/Llama-2-70b-chat-hf" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="meta-llama/Meta-Llama-3-8B-Instruct" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="meta-llama/Meta-Llama-3-70B-Instruct" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="microsoft/Orca-2-7b" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="microsoft/Orca-2-13b" | |
HUGGINGFACE_MODEL_NAME_OR_PATH="google/gemma-1.1-2b-it" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="google/gemma-1.1-7b-it" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="microsoft/Phi-3-mini-128k-instruct" | |
# HUGGINGFACE_MODEL_NAME_OR_PATH="mistralai/Mistral-7B-Instruct-v0.2" | |