{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "5af7e53b-80ff-4058-888d-fe41804f64ba", "metadata": { "scrolled": true, "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Looking in indexes: https://pypi.org/simple, https://pip.repos.neuron.amazonaws.com\n", "Requirement already satisfied: pip in /home/ec2-user/anaconda3/envs/pytorch_p39/lib/python3.9/site-packages (23.1.2)\n" ] } ], "source": [ "!pip install --upgrade pip\n", "!pip install \"sagemaker==2.116.0\" \"huggingface_hub==0.10.1\" --upgrade --quiet" ] }, { "cell_type": "code", "execution_count": 5, "id": "93ee3d96-400f-46b4-8eb3-0f3f3c853a7e", "metadata": { "tags": [] }, "outputs": [], "source": [ "from distutils.dir_util import copy_tree\n", "from pathlib import Path\n", "from huggingface_hub import snapshot_download\n", "import random\n", "import os\n", "import tarfile\n", "import time\n", "import sagemaker\n", "from datetime import datetime\n", "from sagemaker.s3 import S3Uploader\n", "import boto3\n", "from sagemaker.huggingface.model import HuggingFaceModel\n", "from threading import Thread\n", "import subprocess\n", "import shutil" ] }, { "cell_type": "code", "execution_count": 2, "id": "2db37b03-b517-46bc-8602-4999a64399c0", "metadata": { "tags": [] }, "outputs": [], "source": [ "# ------------------------------------------------\n", "# Configuration\n", "# ------------------------------------------------\n", "STAGE = \"prod\"\n", "model_configs = [\n", " # {\n", " # \"inference_2\": False, \n", " # \"path\": \"icbinp\",\n", " # \"endpoint_name\": \"gamma-10000-2023-05-16-14-55\"\n", " # #\"endpoint_name\": f\"{STAGE}-10000-\" + datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n", " # },\n", " # {\n", " # \"inference_2\": False, \n", " # \"path\": \"icb_with_epi\",\n", " # \"endpoint_name\": \"gamma-10000-2023-05-16-14-55\"\n", " # # \"endpoint_name\": f\"{STAGE}-10000-\" + datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n", " # },\n", " {\n", " \"inference_2\": False, \n", " \"path\": \"model_v9\",\n", " # \"endpoint_name\": \"gamma-10000-2023-05-16-14-55\"\n", " \"endpoint_name\": f\"{STAGE}-10000-\" + datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n", " },\n", " {\n", " \"inference_2\": False, \n", " \"path\": \"model_v8\",\n", " #\"endpoint_name\": \"gamma-10001-2023-05-08-06-14\"\n", " \"endpoint_name\": f\"{STAGE}-10001-\" + datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n", " },\n", " # {\n", " # \"inference_2\": False, \n", " # \"path\": \"model_v5_anime\",\n", " # \"endpoint_name\": \"gamma-10001-2023-05-08-06-14\"\n", " # #\"endpoint_name\": f\"{STAGE}-10001-\" + datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n", " # },\n", " # {\n", " # \"inference_2\": False, \n", " # \"path\": \"model_v5.3_comic\",\n", " # #\"endpoint_name\": \"gamma-10002-2023-05-08-07-22\"\n", " # \"endpoint_name\": f\"{STAGE}-10002-\" + datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n", " # },\n", " {\n", " \"inference_2\": False, \n", " \"path\": \"model_v10\",\n", " # \"endpoint_name\": \"gamma-10002-2023-05-08-07-22\"\n", " \"endpoint_name\": f\"{STAGE}-10002-\" + datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n", " },\n", " {\n", " \"inference_2\": True, \n", " \"path\": \"model_v5.2_other\",\n", " # \"endpoint_name\": \"gamma-other-2023-05-04-09-33\"\n", " \"endpoint_name\": f\"{STAGE}-other-\" + datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n", " }\n", " # {\n", " # \"inference_2\": False, \n", " # \"path\": \"model_v6_bheem\",\n", " # \"endpoint_name\": f\"{STAGE}-10003-\" + datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n", " # },\n", " # {\n", " # \"inference_2\": False, \n", " # \"path\": \"model_v12\",\n", " # \"endpoint_name\": \"gamma-10003-2023-05-04-05-20\"\n", " # # \"endpoint_name\": f\"{STAGE}-10003-\" + datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n", " # }\n", "]\n", "\n", "VpcConfig = {\n", " \"Subnets\": [\n", " \"subnet-0df3f71df4c7b29e5\",\n", " \"subnet-0d753b7fc74b5ee68\"\n", " ],\n", " \"SecurityGroupIds\": [\n", " \"sg-033a7948e79a501cd\"\n", " ]\n", "}" ] }, { "cell_type": "code", "execution_count": 3, "id": "d7322ac4-aeeb-4a72-a662-5f3fa74e6454", "metadata": { "tags": [] }, "outputs": [], "source": [ "def compress(tar_dir=None,output_file=\"model.tar.gz\"):\n", " parent_dir=os.getcwd()\n", " os.chdir(parent_dir + \"/\" + tar_dir)\n", " with tarfile.open(os.path.join(parent_dir, output_file), \"w:gz\") as tar:\n", " for item in os.listdir('.'):\n", " print(\"- \" + item)\n", " tar.add(item, arcname=item)\n", " os.chdir(parent_dir)\n", "\n", " \n", "def create_model_tar(config):\n", " print(\"Copying inference 'code': \" + config.get(\"path\"))\n", " \n", " model_tar = Path(config.get(\"path\"))\n", " if os.path.exists(model_tar.joinpath(\"code\")):\n", " shutil.rmtree(model_tar.joinpath(\"code\"))\n", " out_tar = config.get(\"path\") + \".tar.gz\"\n", " model_tar.mkdir(exist_ok=True)\n", " copy_tree(\"code/\", str(model_tar.joinpath(\"code\")))\n", " copy_tree(\"laur_style/\", str(model_tar.joinpath(\"laur_style\")))\n", " \n", " if config.get(\"inference_2\"):\n", " os.remove(model_tar.joinpath(\"code\").joinpath(\"inference.py\"))\n", " os.rename(model_tar.joinpath(\"code\").joinpath(\"inference2.py\"), model_tar.joinpath(\"code\").joinpath(\"inference.py\"))\n", " \n", " print(\"Compressing: \" + config.get(\"path\"))\n", "\n", " if os.path.exists(out_tar):\n", " os.remove(out_tar)\n", "\n", " compress(str(model_tar), out_tar)\n", " \n", "def upload_to_s3(config):\n", " out_tar = config.get(\"path\") + \".tar.gz\"\n", " print(\"Uploading model to S3: \" + out_tar)\n", " s3_model_uri=S3Uploader.upload(local_path=out_tar, desired_s3_uri=f\"s3://comic-assets/stable-diffusion-v1-4/v2/\")\n", " return s3_model_uri\n", " \n", " \n", "def deploy_and_create_endpoint(config, s3_model_uri):\n", " sess = sagemaker.Session()\n", " # sagemaker session bucket -> used for uploading data, models and logs\n", " # sagemaker will automatically create this bucket if it not exists\n", " sagemaker_session_bucket=None\n", " if sagemaker_session_bucket is None and sess is not None:\n", " # set to default bucket if a bucket name is not given\n", " sagemaker_session_bucket = sess.default_bucket()\n", " try:\n", " role = sagemaker.get_execution_role()\n", " except ValueError:\n", " iam = boto3.client('iam')\n", " role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn']\n", "\n", " sess = sagemaker.Session(default_bucket=sagemaker_session_bucket)\n", " \n", " huggingface_model = HuggingFaceModel(\n", " model_data=s3_model_uri, # path to your model and script\n", " role=role, # iam role with permissions to create an Endpoint\n", " transformers_version=\"4.17\", # transformers version used\n", " pytorch_version=\"1.10\", # pytorch version used\n", " py_version='py38',# python version used\n", " vpc_config=VpcConfig,\n", " )\n", "\n", " print(\"Creating endpoint: \" + config.get(\"endpoint_name\"))\n", "\n", " predictor = huggingface_model.deploy(\n", " initial_instance_count=1,\n", " instance_type=\"ml.g4dn.xlarge\",\n", " endpoint_name=config.get(\"endpoint_name\")\n", " )\n", "\n", " \n", "def start_process(config):\n", " try:\n", " create_model_tar(config)\n", " s3_model_uri = upload_to_s3(config)\n", " #s3_model_uri = \"s3://comic-assets/stable-diffusion-v1-4/v2//model_v5.2_other.tar.gz\"\n", " deploy_and_create_endpoint(config, s3_model_uri)\n", " except Exception as e:\n", " print(\"Failed to deploy: \" + config.get(\"path\") + \"\\n\" + str(e))" ] }, { "cell_type": "code", "execution_count": 4, "id": "cdc04669-90a5-4b43-8499-ad1d2dd63a4c", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Copying inference 'code': model_v9\n", "Compressing: model_v9\n", "- scheduler\n", "- vae\n", "- .ipynb_checkpoints\n", "- feature_extractor\n", "- tokenizer\n", "- text_encoder\n", "- model_index.json\n", "- laur_style\n", "- code\n", "- unet\n", "- args.json\n", "Uploading model to S3: model_v9.tar.gz\n", "Creating endpoint: gamma-10000-2023-05-16-14-55\n", "-----------------!\n", "\n", "Completed in : 992.3517553806305s\n" ] } ], "source": [ "threads = []\n", "\n", "os.chdir(\"/home/ec2-user/SageMaker\")\n", "\n", "start_time = time.time()\n", "\n", "for config in model_configs:\n", " thread = Thread(target=start_process, args=(config,))\n", " thread.start()\n", " thread.join()\n", " threads.append(thread)\n", "\n", "for thread in threads:\n", " thread.join()\n", " \n", "print(\"\\n\\nCompleted in : \" + str(time.time() - start_time) + \"s\")\n", "\n", "# For redeploying gamma endpoints or promoting gamma endpoints to prod\n", "\n", "# thread1 = Thread(target=deploy_and_create_endpoint, args=(model_configs[0],\"s3://comic-assets/stable-diffusion-v1-4/v2//model_v9.tar.gz\",))\n", "# thread2 = Thread(target=deploy_and_create_endpoint, args=(model_configs[1],\"s3://comic-assets/stable-diffusion-v1-4/v2//anime_mode_with_lora.tar.gz\",))\n", "# thread3 = Thread(target=deploy_and_create_endpoint, args=(model_configs[0],\"s3://comic-assets/stable-diffusion-v1-4/v2//model_v5.3_comic.tar.gz\",))\n", "# thread4 = Thread(target=deploy_and_create_endpoint, args=(model_configs[3],\"s3://comic-assets/stable-diffusion-v1-4/v2//model_v5.2_other.tar.gz\",))\n", "\n", "# thread1.start()\n", "# thread2.start()\n", "# thread3.start()\n", "# thread4.start()\n", "\n", "# thread1.join()\n", "# thread2.join()\n", "# thread3.join()\n", "# thread4.join()\n", "\n", "# print(\"Done\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "39f007f2-0ff8-487c-b5d7-158f0947b7fd", "metadata": { "collapsed": true, "jupyter": { "outputs_hidden": true }, "tags": [] }, "outputs": [], "source": [ "\n", "# import sagemaker\n", "# import boto3\n", "# import time \n", "\n", "# start = time.time()\n", "\n", "# sess = sagemaker.Session()\n", "# # sagemaker session bucket -> used for uploading data, models and logs\n", "# # sagemaker will automatically create this bucket if it not exists\n", "# sagemaker_session_bucket=None\n", "# if sagemaker_session_bucket is None and sess is not None:\n", "# # set to default bucket if a bucket name is not given\n", "# sagemaker_session_bucket = sess.default_bucket()\n", "\n", "# try:\n", "# role = sagemaker.get_execution_role()\n", "# except ValueError:\n", "# iam = boto3.client('iam')\n", "# role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn']\n", "\n", "# sess = sagemaker.Session(default_bucket=sagemaker_session_bucket)\n", "\n", "# print(f\"sagemaker role arn: {role}\")\n", "# print(f\"sagemaker bucket: {sess.default_bucket()}\")\n", "# print(f\"sagemaker session region: {sess.boto_region_name}\")\n", "# print(sagemaker.get_execution_role())\n", "\n", "# from sagemaker.s3 import S3Uploader\n", "\n", "# print(\"Uploading model to S3\")\n", "\n", "# # upload model.tar.gz to s3\n", "# s3_model_uri=S3Uploader.upload(local_path=\"model.tar.gz\", desired_s3_uri=f\"s3://comic-assets/stable-diffusion-v1-4/v2/\")\n", "\n", "# print(f\"model uploaded to: {s3_model_uri}\")\n", "\n", "\n", "# from sagemaker.huggingface.model import HuggingFaceModel\n", "\n", "# VpcConfig = {\n", "# \"Subnets\": [\n", "# \"subnet-0df3f71df4c7b29e5\",\n", "# \"subnet-0d753b7fc74b5ee68\"\n", "# ],\n", "# \"SecurityGroupIds\": [\n", "# \"sg-033a7948e79a501cd\"\n", "# ]\n", "# }\n", "\n", "# # create Hugging Face Model Class\n", "# huggingface_model = HuggingFaceModel(\n", "# model_data=s3_model_uri, # path to your model and script\n", "# role=role, # iam role with permissions to create an Endpoint\n", "# transformers_version=\"4.17\", # transformers version used\n", "# pytorch_version=\"1.10\", # pytorch version used\n", "# py_version='py38',# python version used\n", "# vpc_config=VpcConfig,\n", "# )\n", "\n", "# print(\"Deploying model\")\n", "\n", "# predictor = huggingface_model.deploy(\n", "# initial_instance_count=1,\n", "# instance_type=\"ml.g4dn.xlarge\",\n", "# # endpoint_name=endpoint_name\n", "# )\n", "\n", "# print(f\"Done {time.time() - start}\")" ] }, { "cell_type": "code", "execution_count": null, "id": "aa95a262-d6ba-4e61-8657-6f8e5bab74a1", "metadata": { "tags": [] }, "outputs": [], "source": [ "!curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.rpm.sh | sudo bash" ] }, { "cell_type": "code", "execution_count": null, "id": "524ca546-2a67-4b51-9cda-a1b51a49c339", "metadata": { "tags": [] }, "outputs": [], "source": [ "!sudo yum install git-lfs" ] }, { "cell_type": "code", "execution_count": null, "id": "3c7e661f-5eee-4357-80f6-e7563941a812", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "availableInstances": [ { "_defaultOrder": 0, "_isFastLaunch": true, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 4, "name": "ml.t3.medium", "vcpuNum": 2 }, { "_defaultOrder": 1, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.t3.large", "vcpuNum": 2 }, { "_defaultOrder": 2, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.t3.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 3, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.t3.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 4, "_isFastLaunch": true, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.m5.large", "vcpuNum": 2 }, { "_defaultOrder": 5, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.m5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 6, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.m5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 7, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.m5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 8, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.m5.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 9, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.m5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 10, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.m5.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 11, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.m5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 12, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.m5d.large", "vcpuNum": 2 }, { "_defaultOrder": 13, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.m5d.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 14, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.m5d.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 15, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.m5d.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 16, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.m5d.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 17, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.m5d.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 18, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.m5d.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 19, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.m5d.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 20, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": true, "memoryGiB": 0, "name": "ml.geospatial.interactive", "supportedImageNames": [ "sagemaker-geospatial-v1-0" ], "vcpuNum": 0 }, { "_defaultOrder": 21, "_isFastLaunch": true, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 4, "name": "ml.c5.large", "vcpuNum": 2 }, { "_defaultOrder": 22, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.c5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 23, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.c5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 24, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.c5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 25, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 72, "name": "ml.c5.9xlarge", "vcpuNum": 36 }, { "_defaultOrder": 26, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 96, "name": "ml.c5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 27, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 144, "name": "ml.c5.18xlarge", "vcpuNum": 72 }, { "_defaultOrder": 28, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.c5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 29, "_isFastLaunch": true, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.g4dn.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 30, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.g4dn.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 31, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.g4dn.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 32, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.g4dn.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 33, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.g4dn.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 34, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.g4dn.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 35, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 61, "name": "ml.p3.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 36, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 244, "name": "ml.p3.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 37, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 8, "hideHardwareSpecs": false, "memoryGiB": 488, "name": "ml.p3.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 38, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 8, "hideHardwareSpecs": false, "memoryGiB": 768, "name": "ml.p3dn.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 39, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.r5.large", "vcpuNum": 2 }, { "_defaultOrder": 40, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.r5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 41, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.r5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 42, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.r5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 43, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.r5.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 44, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.r5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 45, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 512, "name": "ml.r5.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 46, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 768, "name": "ml.r5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 47, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.g5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 48, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.g5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 49, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.g5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 50, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.g5.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 51, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.g5.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 52, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.g5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 53, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.g5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 54, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 8, "hideHardwareSpecs": false, "memoryGiB": 768, "name": "ml.g5.48xlarge", "vcpuNum": 192 } ], "instance_type": "ml.t3.medium", "kernelspec": { "display_name": "conda_pytorch_p39", "language": "python", "name": "conda_pytorch_p39" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.15" } }, "nbformat": 4, "nbformat_minor": 5 }