File size: 1,629 Bytes
9c0276d cad3e14 3ebd805 41b9769 3ebd805 1eb0e2e 9bebf7e 714cc60 b7f9ccb c8d90f4 abaaa3e ffa4f55 5006210 07a1e01 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
# Use .env.local to change these variables, or directly change your env
# DO NOT EDIT THIS FILE WITH SENSITIVE DATA
MONGODB_URL=#your mongodb URL here
MONGODB_DB_NAME=chat-ui
COOKIE_NAME=hf-chat
# Increase depending on the model
PUBLIC_MAX_INPUT_TOKENS=1000
PUBLIC_ORIGIN=#https://hf.co
PUBLIC_MODEL_NAME=OpenAssistant/oasst-sft-6-llama-30b # public facing link
PUBLIC_MODEL_ID=OpenAssistant/oasst-sft-6-llama-30b-xor # used to link to model page
PUBLIC_DISABLE_INTRO_TILES=false
PUBLIC_USER_MESSAGE_TOKEN=<|prompter|>
PUBLIC_ASSISTANT_MESSAGE_TOKEN=<|assistant|>
PUBLIC_SEP_TOKEN=</s>
PUBLIC_PREPROMPT="Below are a series of dialogues between various people and an AI assistant. The AI tries to be helpful, polite, honest, sophisticated, emotionally aware, and humble-but-knowledgeable. The assistant is happy to help with almost anything, and will do its best to understand exactly what is needed. It also tries to avoid giving false or misleading information, and it caveats when it isn't entirely sure about the right answer. That said, the assistant is practical and really does its best, and doesn't let caution get too much in the way of being useful."
PUBLIC_GOOGLE_ANALYTICS_ID=#G-XXXXXXXX / Leave empty to disable
# Copy this in .env.local with and replace "hf_<token>" your HF token from https://huggingface.co/settings/token
# You can also change the model from OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5 to your own model
MODEL_ENDPOINTS=`[{
"endpoint": "https://api-inference.huggingface.co/models/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
"authorization": "Bearer hf_<token>",
"weight": 1
}]` |