litellm / config.yaml
jinnotgin's picture
Update config.yaml
78e7319 verified
raw
history blame
1.1 kB
litellm_settings:
vertex_project: "os.environ/VERTEX_PROJECT_ID" # Your Project ID
vertex_location: "os.environ/VERTEX_LOCATION" # location for gemini
model_list:
- model_name: gemini-pro
litellm_params:
model: gemini-pro
rpm: 60 # request per minute
- model_name: gemini-pro-vision
litellm_params:
model: vertex_ai/gemini-pro-vision
rpm: 30 # request per minute
- model_name: codechat-bison
litellm_params:
model: codechat-bison-32k
rpm: 60 # request per minute
litellm_settings: # module level litellm settings - https://github.com/BerriAI/litellm/blob/main/litellm/__init__.py
drop_params: True
#set_verbose: True
general_settings:
master_key: "os.environ/MASTER_KEY" # [OPTIONAL] Only use this if you to require all calls to contain this key (Authorization: Bearer sk-1234)
database_url: "os.environ/DATABASE_URL"
max_parallel_requests: 10 # max parallel requests for a user = 100
budget_duration: 30d # (str) frequency of reset - You can set duration as seconds ("30s"), minutes ("30m"), hours ("30h"), days ("30d").