bad-tomich1 commited on
Commit
98ba684
1 Parent(s): babfa2c

Upload launch.py

Browse files
Files changed (1) hide show
  1. launch.py +139 -0
launch.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
3
+
4
+ import sys
5
+ import platform
6
+ import fooocus_version
7
+ import argparse
8
+
9
+ from modules.launch_util import is_installed, run, python, \
10
+ run_pip, repo_dir, git_clone, requirements_met, script_path, dir_repos
11
+ from modules.model_loader import load_file_from_url
12
+ from modules.path import modelfile_path, lorafile_path, clip_vision_path, controlnet_path, vae_approx_path, fooocus_expansion_path, upscale_models_path
13
+
14
+
15
+ REINSTALL_ALL = False
16
+ DEFAULT_ARGS = ['--disable-smart-memory', '--disable-cuda-malloc']
17
+
18
+ def prepare_environment():
19
+ torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118")
20
+ torch_command = os.environ.get('TORCH_COMMAND',
21
+ f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}")
22
+ requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
23
+
24
+ xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.21')
25
+
26
+ comfy_repo = os.environ.get('COMFY_REPO', "https://github.com/comfyanonymous/ComfyUI")
27
+ comfy_commit_hash = os.environ.get('COMFY_COMMIT_HASH', "2381d36e6db8e8150e42ff2ede628db5b00ae26f")
28
+
29
+ print(f"Python {sys.version}")
30
+ print(f"Fooocus version: {fooocus_version.version}")
31
+
32
+ comfyui_name = 'ComfyUI-from-StabilityAI-Official'
33
+ git_clone(comfy_repo, repo_dir(comfyui_name), "Inference Engine", comfy_commit_hash)
34
+ sys.path.append(os.path.join(script_path, dir_repos, comfyui_name))
35
+
36
+ if REINSTALL_ALL or not is_installed("torch") or not is_installed("torchvision"):
37
+ run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
38
+
39
+ if REINSTALL_ALL or not is_installed("xformers"):
40
+ if platform.system() == "Windows":
41
+ if platform.python_version().startswith("3.10"):
42
+ run_pip(f"install -U -I --no-deps {xformers_package}", "xformers", live=True)
43
+ else:
44
+ print("Installation of xformers is not supported in this version of Python.")
45
+ print(
46
+ "You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
47
+ if not is_installed("xformers"):
48
+ exit(0)
49
+ elif platform.system() == "Linux":
50
+ run_pip(f"install -U -I --no-deps {xformers_package}", "xformers")
51
+
52
+ if REINSTALL_ALL or not requirements_met(requirements_file):
53
+ run_pip(f"install -r \"{requirements_file}\"", "requirements")
54
+
55
+ return
56
+
57
+
58
+ model_filenames = []
59
+
60
+ lora_filenames = []
61
+
62
+ clip_vision_filenames = [
63
+ ('clip_vision_g.safetensors',
64
+ 'https://huggingface.co/stabilityai/control-lora/resolve/main/revision/clip_vision_g.safetensors')
65
+ ]
66
+
67
+ controlnet_filenames = [
68
+ ('control-lora-canny-rank128.safetensors',
69
+ 'https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank128/control-lora-canny-rank128.safetensors'),
70
+ ('control-lora-canny-rank256.safetensors',
71
+ 'https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-canny-rank256.safetensors'),
72
+ ('control-lora-depth-rank128.safetensors',
73
+ 'https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank128/control-lora-depth-rank128.safetensors'),
74
+ ('control-lora-depth-rank256.safetensors',
75
+ 'https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-depth-rank256.safetensors')
76
+ ]
77
+
78
+ vae_approx_filenames = [
79
+ ('xlvaeapp.pth',
80
+ 'https://huggingface.co/lllyasviel/misc/resolve/main/xlvaeapp.pth'),
81
+ ('taesd_decoder.pth',
82
+ 'https://github.com/madebyollin/taesd/raw/main/taesd_decoder.pth')
83
+ ]
84
+
85
+
86
+ upscaler_filenames = [
87
+ ('fooocus_upscaler_s409985e5.bin',
88
+ 'https://huggingface.co/lllyasviel/misc/resolve/main/fooocus_upscaler_s409985e5.bin')
89
+ ]
90
+
91
+
92
+ def download_models():
93
+ for file_name, url in model_filenames:
94
+ load_file_from_url(url=url, model_dir=modelfile_path, file_name=file_name)
95
+ for file_name, url in lora_filenames:
96
+ load_file_from_url(url=url, model_dir=lorafile_path, file_name=file_name)
97
+ for file_name, url in clip_vision_filenames:
98
+ load_file_from_url(url=url, model_dir=clip_vision_path, file_name=file_name)
99
+ for file_name, url in controlnet_filenames:
100
+ load_file_from_url(url=url, model_dir=controlnet_path, file_name=file_name)
101
+ for file_name, url in vae_approx_filenames:
102
+ load_file_from_url(url=url, model_dir=vae_approx_path, file_name=file_name)
103
+ for file_name, url in upscaler_filenames:
104
+ load_file_from_url(url=url, model_dir=upscale_models_path, file_name=file_name)
105
+
106
+ load_file_from_url(
107
+ url='https://huggingface.co/lllyasviel/misc/resolve/main/fooocus_expansion.bin',
108
+ model_dir=fooocus_expansion_path,
109
+ file_name='pytorch_model.bin'
110
+ )
111
+
112
+ return
113
+
114
+
115
+ def parse_args():
116
+ argv = sys.argv + DEFAULT_ARGS
117
+ sys.argv = [sys.argv[0]]
118
+ import comfy.cli_args
119
+ sys.argv = argv
120
+
121
+ parser = argparse.ArgumentParser('launch.py', parents=[comfy.cli_args.parser], conflict_handler='resolve')
122
+ parser.add_argument("--port", type=int, default=None, help="Set the listen port.")
123
+ parser.add_argument("--share", action='store_true', help="Set whether to share on Gradio.")
124
+ parser.add_argument("--listen", type=str, default=None, metavar="IP", nargs="?", const="0.0.0.0", help="Set the listen interface.")
125
+
126
+ comfy.cli_args.args = parser.parse_args()
127
+
128
+
129
+ def cuda_malloc():
130
+ import cuda_malloc
131
+
132
+
133
+ prepare_environment()
134
+
135
+ parse_args()
136
+
137
+ download_models()
138
+
139
+ from webui import *