# ------------- IMAGE API CONFIG -------------- | |
# Supported values: | |
# - VIDEOCHAIN | |
RENDERING_ENGINE="VIDEOCHAIN" | |
VIDEOCHAIN_API_URL="http://localhost:7860" | |
VIDEOCHAIN_API_TOKEN= | |
# Not supported yet | |
REPLICATE_TOKEN= | |
# ------------- LLM API CONFIG ---------------- | |
# Supported values: | |
# - INFERENCE_ENDPOINT | |
# - INFERENCE_API | |
LLM_ENGINE="INFERENCE_ENDPOINT" | |
# Hugging Face token (if you choose to use a custom Inference Endpoint or an Inference API model) | |
HF_API_TOKEN= | |
# URL to a custom text-generation Inference Endpoint of your choice | |
# -> You can leave it empty if you decide to use an Inference API Model instead | |
HF_INFERENCE_ENDPOINT_URL= | |
# You can also use a model from the Inference API (not a custom inference endpoint) | |
# -> You can leave it empty if you decide to use an Inference Endpoint URL instead | |
HF_INFERENCE_API_MODEL="codellama/CodeLlama-7b-hf" | |
# Not supported yet | |
OPENAI_TOKEN= | |