Spaces:
Paused
Paused
Update main.py
Browse files
main.py
CHANGED
|
@@ -1,3 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import comfy.options
|
| 2 |
comfy.options.enable_args_parsing()
|
| 3 |
|
|
@@ -8,10 +21,8 @@ import time
|
|
| 8 |
from comfy.cli_args import args
|
| 9 |
from app.logger import setup_logger
|
| 10 |
|
| 11 |
-
|
| 12 |
setup_logger(log_level=args.verbose)
|
| 13 |
|
| 14 |
-
|
| 15 |
def execute_prestartup_script():
|
| 16 |
def execute_script(script_path):
|
| 17 |
module_name = os.path.splitext(script_path)[0]
|
|
@@ -28,28 +39,41 @@ def execute_prestartup_script():
|
|
| 28 |
return
|
| 29 |
|
| 30 |
node_paths = folder_paths.get_folder_paths("custom_nodes")
|
|
|
|
| 31 |
for custom_node_path in node_paths:
|
| 32 |
possible_modules = os.listdir(custom_node_path)
|
| 33 |
-
node_prestartup_times = []
|
| 34 |
-
|
| 35 |
for possible_module in possible_modules:
|
| 36 |
module_path = os.path.join(custom_node_path, possible_module)
|
| 37 |
-
if os.path.isfile(module_path) or module_path.endswith(".disabled") or
|
| 38 |
continue
|
| 39 |
|
| 40 |
script_path = os.path.join(module_path, "prestartup_script.py")
|
| 41 |
if os.path.exists(script_path):
|
| 42 |
time_before = time.perf_counter()
|
| 43 |
success = execute_script(script_path)
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
print()
|
| 54 |
|
| 55 |
execute_prestartup_script()
|
|
@@ -83,7 +107,7 @@ if __name__ == "__main__":
|
|
| 83 |
if args.windows_standalone_build:
|
| 84 |
try:
|
| 85 |
import fix_torch
|
| 86 |
-
except:
|
| 87 |
pass
|
| 88 |
|
| 89 |
import comfy.utils
|
|
@@ -94,16 +118,20 @@ from server import BinaryEventTypes
|
|
| 94 |
import nodes
|
| 95 |
import comfy.model_management
|
| 96 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
def cuda_malloc_warning():
|
| 98 |
device = comfy.model_management.get_torch_device()
|
| 99 |
device_name = comfy.model_management.get_torch_device_name(device)
|
| 100 |
-
|
| 101 |
if "cudaMallocAsync" in device_name:
|
| 102 |
for b in cuda_malloc.blacklist:
|
| 103 |
if b in device_name:
|
| 104 |
-
|
| 105 |
-
if
|
| 106 |
-
logging.warning("\nWARNING: this card most likely does not support cuda-malloc, if you get
|
| 107 |
|
| 108 |
def prompt_worker(q, server):
|
| 109 |
e = execution.PromptExecutor(server, lru_size=args.cache_lru)
|
|
@@ -114,7 +142,7 @@ def prompt_worker(q, server):
|
|
| 114 |
while True:
|
| 115 |
timeout = 1000.0
|
| 116 |
if need_gc:
|
| 117 |
-
timeout = max(gc_collect_interval - (
|
| 118 |
|
| 119 |
queue_item = q.get(timeout=timeout)
|
| 120 |
if queue_item is not None:
|
|
@@ -166,7 +194,6 @@ async def run(server, address='', port=8188, verbose=True, call_on_start=None):
|
|
| 166 |
addresses.append((addr, port))
|
| 167 |
await asyncio.gather(server.start_multi_address(addresses, call_on_start), server.publish_loop())
|
| 168 |
|
| 169 |
-
|
| 170 |
def hijack_progress(server):
|
| 171 |
def hook(value, total, preview_image):
|
| 172 |
comfy.model_management.throw_exception_if_processing_interrupted()
|
|
@@ -177,13 +204,11 @@ def hijack_progress(server):
|
|
| 177 |
server.send_sync(BinaryEventTypes.UNENCODED_PREVIEW_IMAGE, preview_image, server.client_id)
|
| 178 |
comfy.utils.set_progress_bar_global_hook(hook)
|
| 179 |
|
| 180 |
-
|
| 181 |
def cleanup_temp():
|
| 182 |
temp_dir = folder_paths.get_temp_directory()
|
| 183 |
if os.path.exists(temp_dir):
|
| 184 |
shutil.rmtree(temp_dir, ignore_errors=True)
|
| 185 |
|
| 186 |
-
|
| 187 |
if __name__ == "__main__":
|
| 188 |
if args.temp_directory:
|
| 189 |
temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp")
|
|
@@ -195,7 +220,7 @@ if __name__ == "__main__":
|
|
| 195 |
try:
|
| 196 |
import new_updater
|
| 197 |
new_updater.update_windows_updater()
|
| 198 |
-
except:
|
| 199 |
pass
|
| 200 |
|
| 201 |
loop = asyncio.new_event_loop()
|
|
@@ -225,7 +250,7 @@ if __name__ == "__main__":
|
|
| 225 |
logging.info(f"Setting output directory to: {output_dir}")
|
| 226 |
folder_paths.set_output_directory(output_dir)
|
| 227 |
|
| 228 |
-
#
|
| 229 |
folder_paths.add_model_folder_path("checkpoints", os.path.join(folder_paths.get_output_directory(), "checkpoints"))
|
| 230 |
folder_paths.add_model_folder_path("clip", os.path.join(folder_paths.get_output_directory(), "clip"))
|
| 231 |
folder_paths.add_model_folder_path("vae", os.path.join(folder_paths.get_output_directory(), "vae"))
|
|
|
|
| 1 |
+
# GPU ํจ์น: NVIDIA ๋๋ผ์ด๋ฒ๊ฐ ์์ผ๋ฉด torch.cuda ๊ด๋ จ ํธ์ถ์ด CPU ๋๋ฐ์ด์ค๋ก ์ ํ๋๋๋ก ์ฒ๋ฆฌ
|
| 2 |
+
import torch
|
| 3 |
+
if not torch.cuda.is_available():
|
| 4 |
+
# torch.cuda.current_device()๊ฐ ํธ์ถ๋๋ฉด 0์ ๋ฐํํ๋๋ก ํจ์นํ๊ณ ,
|
| 5 |
+
# torch.device(์ ์)๋ฅผ ์์ฒญ๋ฐ์ผ๋ฉด "cpu" ์ฅ์น๋ก ๋ฐํํ๋๋ก ๋ณ๊ฒฝํฉ๋๋ค.
|
| 6 |
+
torch.cuda.current_device = lambda: 0
|
| 7 |
+
original_torch_device = torch.device
|
| 8 |
+
def patched_torch_device(arg):
|
| 9 |
+
if isinstance(arg, int):
|
| 10 |
+
return original_torch_device("cpu")
|
| 11 |
+
return original_torch_device(arg)
|
| 12 |
+
torch.device = patched_torch_device
|
| 13 |
+
|
| 14 |
import comfy.options
|
| 15 |
comfy.options.enable_args_parsing()
|
| 16 |
|
|
|
|
| 21 |
from comfy.cli_args import args
|
| 22 |
from app.logger import setup_logger
|
| 23 |
|
|
|
|
| 24 |
setup_logger(log_level=args.verbose)
|
| 25 |
|
|
|
|
| 26 |
def execute_prestartup_script():
|
| 27 |
def execute_script(script_path):
|
| 28 |
module_name = os.path.splitext(script_path)[0]
|
|
|
|
| 39 |
return
|
| 40 |
|
| 41 |
node_paths = folder_paths.get_folder_paths("custom_nodes")
|
| 42 |
+
node_prestartup_times = [] # ๋ชจ๋ ๋
ธ๋์ ์คํ์๊ฐ์ ๋์ ํฉ๋๋ค.
|
| 43 |
for custom_node_path in node_paths:
|
| 44 |
possible_modules = os.listdir(custom_node_path)
|
|
|
|
|
|
|
| 45 |
for possible_module in possible_modules:
|
| 46 |
module_path = os.path.join(custom_node_path, possible_module)
|
| 47 |
+
if os.path.isfile(module_path) or module_path.endswith(".disabled") or possible_module == "__pycache__":
|
| 48 |
continue
|
| 49 |
|
| 50 |
script_path = os.path.join(module_path, "prestartup_script.py")
|
| 51 |
if os.path.exists(script_path):
|
| 52 |
time_before = time.perf_counter()
|
| 53 |
success = execute_script(script_path)
|
| 54 |
+
elapsed = time.perf_counter() - time_before
|
| 55 |
+
node_prestartup_times.append((elapsed, module_path, success))
|
| 56 |
+
|
| 57 |
+
if node_prestartup_times:
|
| 58 |
+
# Rich ๋ผ์ด๋ธ๋ฌ๋ฆฌ๊ฐ ์๋ค๋ฉด ํ
์ด๋ธ ํ์์ผ๋ก ์ถ๋ ฅํฉ๋๋ค.
|
| 59 |
+
try:
|
| 60 |
+
from rich.console import Console
|
| 61 |
+
from rich.table import Table
|
| 62 |
+
console = Console()
|
| 63 |
+
table = Table(title="Prestartup Times for Custom Nodes")
|
| 64 |
+
table.add_column("Time (s)", justify="right")
|
| 65 |
+
table.add_column("Status")
|
| 66 |
+
table.add_column("Custom Node Path")
|
| 67 |
+
for elapsed, module_path, success in sorted(node_prestartup_times, key=lambda x: x[0]):
|
| 68 |
+
status = "[green]Success[/green]" if success else "[red]Failed[/red]"
|
| 69 |
+
table.add_row(f"{elapsed:.1f}", status, module_path)
|
| 70 |
+
console.print(table)
|
| 71 |
+
except ImportError:
|
| 72 |
+
# Rich ๋ฏธ์ค์น ์ ๊ธฐ์กด print ๋ฐฉ์ ์ฌ์ฉ
|
| 73 |
+
print("\nPrestartup times for custom nodes:")
|
| 74 |
+
for elapsed, module_path, success in sorted(node_prestartup_times, key=lambda x: x[0]):
|
| 75 |
+
import_message = "" if success else " (PRESTARTUP FAILED)"
|
| 76 |
+
print("{:6.1f} seconds{}:".format(elapsed, import_message), module_path)
|
| 77 |
print()
|
| 78 |
|
| 79 |
execute_prestartup_script()
|
|
|
|
| 107 |
if args.windows_standalone_build:
|
| 108 |
try:
|
| 109 |
import fix_torch
|
| 110 |
+
except Exception:
|
| 111 |
pass
|
| 112 |
|
| 113 |
import comfy.utils
|
|
|
|
| 118 |
import nodes
|
| 119 |
import comfy.model_management
|
| 120 |
|
| 121 |
+
# NVIDIA GPU ๋๋ผ์ด๋ฒ๊ฐ ์๋ ๊ฒฝ์ฐ ๊ฒฝ๊ณ ๋ฉ์์ง๋ฅผ ๋จ๊น๋๋ค.
|
| 122 |
+
if not torch.cuda.is_available():
|
| 123 |
+
logging.warning("No NVIDIA GPU driver found. Running in CPU mode. Performance may be degraded.")
|
| 124 |
+
|
| 125 |
def cuda_malloc_warning():
|
| 126 |
device = comfy.model_management.get_torch_device()
|
| 127 |
device_name = comfy.model_management.get_torch_device_name(device)
|
| 128 |
+
warning_needed = False
|
| 129 |
if "cudaMallocAsync" in device_name:
|
| 130 |
for b in cuda_malloc.blacklist:
|
| 131 |
if b in device_name:
|
| 132 |
+
warning_needed = True
|
| 133 |
+
if warning_needed:
|
| 134 |
+
logging.warning("\nWARNING: this card most likely does not support cuda-malloc, if you get 'CUDA error' please run ComfyUI with: --disable-cuda-malloc\n")
|
| 135 |
|
| 136 |
def prompt_worker(q, server):
|
| 137 |
e = execution.PromptExecutor(server, lru_size=args.cache_lru)
|
|
|
|
| 142 |
while True:
|
| 143 |
timeout = 1000.0
|
| 144 |
if need_gc:
|
| 145 |
+
timeout = max(gc_collect_interval - (time.perf_counter() - last_gc_collect), 0.0)
|
| 146 |
|
| 147 |
queue_item = q.get(timeout=timeout)
|
| 148 |
if queue_item is not None:
|
|
|
|
| 194 |
addresses.append((addr, port))
|
| 195 |
await asyncio.gather(server.start_multi_address(addresses, call_on_start), server.publish_loop())
|
| 196 |
|
|
|
|
| 197 |
def hijack_progress(server):
|
| 198 |
def hook(value, total, preview_image):
|
| 199 |
comfy.model_management.throw_exception_if_processing_interrupted()
|
|
|
|
| 204 |
server.send_sync(BinaryEventTypes.UNENCODED_PREVIEW_IMAGE, preview_image, server.client_id)
|
| 205 |
comfy.utils.set_progress_bar_global_hook(hook)
|
| 206 |
|
|
|
|
| 207 |
def cleanup_temp():
|
| 208 |
temp_dir = folder_paths.get_temp_directory()
|
| 209 |
if os.path.exists(temp_dir):
|
| 210 |
shutil.rmtree(temp_dir, ignore_errors=True)
|
| 211 |
|
|
|
|
| 212 |
if __name__ == "__main__":
|
| 213 |
if args.temp_directory:
|
| 214 |
temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp")
|
|
|
|
| 220 |
try:
|
| 221 |
import new_updater
|
| 222 |
new_updater.update_windows_updater()
|
| 223 |
+
except Exception:
|
| 224 |
pass
|
| 225 |
|
| 226 |
loop = asyncio.new_event_loop()
|
|
|
|
| 250 |
logging.info(f"Setting output directory to: {output_dir}")
|
| 251 |
folder_paths.set_output_directory(output_dir)
|
| 252 |
|
| 253 |
+
# ๊ธฐ๋ณธ ๋ชจ๋ธ ์ ์ฅ ํด๋ ๊ฒฝ๋ก ์ค์
|
| 254 |
folder_paths.add_model_folder_path("checkpoints", os.path.join(folder_paths.get_output_directory(), "checkpoints"))
|
| 255 |
folder_paths.add_model_folder_path("clip", os.path.join(folder_paths.get_output_directory(), "clip"))
|
| 256 |
folder_paths.add_model_folder_path("vae", os.path.join(folder_paths.get_output_directory(), "vae"))
|