{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [], "gpuType": "T4" }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" }, "accelerator": "GPU" }, "cells": [ { "cell_type": "code", "source": [ "!rm -rf /content/sample_data\n", "!wget https://huggingface.co/waveydaveygravy/Moore-AnimateAnyone/resolve/main/Moore-AnimateAnyone.zip\n", "!unzip Moore-AnimateAnyone.zip\n", "%cd /content/Moore-AnimateAnyone\n", "!pip install -r requirements.txt\n", "\n", "\n" ], "metadata": { "id": "SGWqEghrOl9j" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "!apt -y install -qq aria2\n", "\n", "BaseModelUrl = \"https://huggingface.co/runwayml/stable-diffusion-v1-5\"\n", "BaseModelDir = \"/content/Moore-AnimateAnyone/pretrainedweights/stable-diffusion-v1-5\"\n", "\n", "# Create the target directory and necessary subdirectories\n", "!mkdir -p {BaseModelDir} {BaseModelDir}/vae {BaseModelDir}/unet {BaseModelDir}/tokenizer {BaseModelDir}/text_encoder {BaseModelDir}/scheduler {BaseModelDir}/safety_checker {BaseModelDir}/feature_extractor\n", "\n", "# Clone all model components using aria2c, specifying the correct output directories\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl} -d {BaseModelDir}\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/model_index.json -d {BaseModelDir} -o model_index.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/vae/diffusion_pytorch_model.bin -d {BaseModelDir}/vae -o diffusion_pytorch_model.bin\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/vae/config.json -d {BaseModelDir}/vae -o config.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/unet/diffusion_pytorch_model.bin -d {BaseModelDir}/unet -o diffusion_pytorch_model.bin\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/unet/config.json -d {BaseModelDir}/unet -o config.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/vocab.json -d {BaseModelDir}/tokenizer -o vocab.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/tokenizer_config.json -d {BaseModelDir}/tokenizer -o tokenizer_config.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/special_tokens_map.json -d {BaseModelDir}/tokenizer -o special_tokens_map.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/merges.txt -d {BaseModelDir}/tokenizer -o merges.txt\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/text_encoder/pytorch_model.bin -d {BaseModelDir}/text_encoder -o pytorch_model.bin\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/text_encoder/config.json -d {BaseModelDir}/text_encoder -o config.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/scheduler/scheduler_config.json -d {BaseModelDir}/scheduler -o scheduler_config.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/safety_checker/pytorch_model.bin -d {BaseModelDir}/safety\n" ], "metadata": { "id": "f86SbtfCUgF3" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "%cd /content/Moore-AnimateAnyone/pretrainedweights\n", "!wget https://huggingface.co/patrolli/AnimateAnyone/resolve/main/denoising_unet.pth\n", "!wget https://huggingface.co/patrolli/AnimateAnyone/resolve/main/motion_module.pth\n", "!wget https://huggingface.co/patrolli/AnimateAnyone/resolve/main/pose_guider.pth\n", "!wget https://huggingface.co/patrolli/AnimateAnyone/resolve/main/reference_unet.pth\n", "\n", "%cd /content/Moore-AnimateAnyone/pretrainedweights/image_encoder\n", "!wget https://huggingface.co/lambdalabs/sd-image-variations-diffusers/resolve/main/image_encoder/pytorch_model.bin\n", "!wget https://huggingface.co/lambdalabs/sd-image-variations-diffusers/resolve/main/image_encoder/config.json\n", "\n", "%cd /content/Moore-AnimateAnyone/pretrainedweights/DWpose\n", "!wget https://huggingface.co/yzd-v/DWPose/resolve/main/yolox_l.onnx #yolox\n", "!wget https://huggingface.co/yzd-v/DWPose/resolve/main/dw-ll_ucoco_384.onnx #dwpose" ], "metadata": { "id": "-ESNzmpIWHyf" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "#@title to extract pose (not tested)\n", "#%cd /content/Moore-AnimateAnyone\n", "#!python /content/Moore-AnimateAnyone/vid2pose.py --video_path /content/Moore-AnimateAnyone/animation4.mp4" ], "metadata": { "id": "BobRXjG7tG4b" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "#@title YOU NEED TO HAVE A POSE VIDEO FIRST, CAN USE THE EXAMPLES. GRADIO WILL CRASH DURING GENERATION BUT SAVE TO OUTPUTS--\n", "%cd /content/Moore-AnimateAnyone\n", "!python /content/Moore-AnimateAnyone/app.py" ], "metadata": { "id": "DJxzTyuKbIX1" }, "execution_count": null, "outputs": [] } ] }