from PIL import Image import gradio as gr from huggingface_hub import hf_hub_download from model import Model from app_edge import create_demo as create_demo_edge from app_depth import create_demo as create_demo_depth import os import torch import subprocess # def install_requirements(): # try: # # subprocess.run(['pip', 'install', 'torch==2.1.2+cu118', '--extra-index-url', 'https://download.pytorch.org/whl/cu118'], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # subprocess.run(['pip', 'show', 'torch'], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # # result = subprocess.run(['pip', 'install', '-r', 'requirements.txt'], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # print("安装成功!") # # print("输出:", result.stdout.decode('utf-8')) # except subprocess.CalledProcessError as e: # print("安装失败!") # print("错误:", e.stderr.decode('utf-8')) # install_requirements() print("Torch version:", torch.__version__) # hf_hub_download(repo_id='wondervictor/ControlAR', # filename='canny_MR.safetensors', # local_dir='./checkpoints/') # hf_hub_download(repo_id='wondervictor/ControlAR', # filename='depth_MR.safetensors', # local_dir='./checkpoints/') # # hf_hub_download('google/flan-t5-xl', cache_dir='./checkpoints/') ckpt_folder = './checkpoints' t5_folder = os.path.join(ckpt_folder, "flan-t5-xl/flan-t5-xl") # dinov2_folder = os.path.join(ckpt_folder, "dinov2-small") dinov2_folder = os.path.join(ckpt_folder, "dinov2-base") hf_hub_download(repo_id="google/flan-t5-xl", filename="config.json", local_dir=t5_folder) hf_hub_download(repo_id="google/flan-t5-xl", filename="pytorch_model-00001-of-00002.bin", local_dir=t5_folder) hf_hub_download(repo_id="google/flan-t5-xl", filename="pytorch_model-00002-of-00002.bin", local_dir=t5_folder) hf_hub_download(repo_id="google/flan-t5-xl", filename="pytorch_model.bin.index.json", local_dir=t5_folder) hf_hub_download(repo_id="google/flan-t5-xl", filename="special_tokens_map.json", local_dir=t5_folder) hf_hub_download(repo_id="google/flan-t5-xl", filename="spiece.model", local_dir=t5_folder) hf_hub_download(repo_id="google/flan-t5-xl", filename="tokenizer_config.json", local_dir=t5_folder) hf_hub_download(repo_id="lllyasviel/Annotators", filename="dpt_hybrid-midas-501f0c75.pt", local_dir=ckpt_folder) hf_hub_download(repo_id="wondervictor/ControlAR", filename="edge_base.safetensors", local_dir=ckpt_folder) hf_hub_download(repo_id="wondervictor/ControlAR", filename="depth_base.safetensors", local_dir=ckpt_folder) hf_hub_download(repo_id="facebook/dinov2-base", filename="config.json", local_dir=dinov2_folder) hf_hub_download(repo_id="facebook/dinov2-base", filename="preprocessor_config.json", local_dir=dinov2_folder) hf_hub_download(repo_id="facebook/dinov2-base", filename="pytorch_model.bin", local_dir=dinov2_folder) DESCRIPTION = "# [ControlAR: Controllable Image Generation with Autoregressive Models](https://arxiv.org/abs/2410.02705) \n ### The first image in outputs is the condition. The others are the images generated by ControlAR. \n ### You can run locally by following the instruction on our [Github Repo](https://github.com/hustvl/ControlAR)." SHOW_DUPLICATE_BUTTON = os.getenv("SHOW_DUPLICATE_BUTTON") == "1" model = Model() # device = "cuda" # model.to(device) with gr.Blocks(css="style.css") as demo: gr.Markdown(DESCRIPTION) gr.DuplicateButton( value="Duplicate Space for private use", elem_id="duplicate-button", visible=SHOW_DUPLICATE_BUTTON, ) with gr.Tabs(): with gr.TabItem("Depth to Image"): create_demo_depth(model.process_depth) with gr.TabItem("Edge to Image"): create_demo_edge(model.process_edge) if __name__ == "__main__": demo.launch(share=False)