grouped-sampling-demo / on_server_start.py
yonikremer's picture
models will be downloaded when the server starts
7b34e37
raw
history blame
864 Bytes
"""
A script that is run when the server starts.
"""
from transformers import AutoModelForCausalLM, AutoTokenizer
def download_model(model_name: str):
"""
Downloads the model with the given name.
:param model_name: The name of the model to download.
"""
AutoModelForCausalLM.from_pretrained(model_name)
AutoTokenizer.from_pretrained(model_name)
def download_useful_models():
"""
Downloads the models that are useful for this project.
So that the user doesn't have to wait for the models to download when they first use the app.
"""
useful_models = (
"gpt2",
"EleutherAI/gpt-j-6B",
"sberbank-ai/mGPT",
"facebook/opt-125m",
)
for model_name in useful_models:
download_model(model_name)
def main():
download_useful_models()
if __name__ == "__main__":
main()