From 6f4d889053b61b378d8d0c1fc1265b5bcfd674a2 Mon Sep 17 00:00:00 2001 From: blob42 Date: Thu, 27 Nov 2025 16:29:38 +0100 Subject: [PATCH] feat: add --total-ram option for controlling visible system RAM in Comfy Adds a new command-line argument `--total-ram` to limit the amount of system RAM that ComfyUI considers available, allowing users to simulate lower memory environments. This enables more predictable behavior when testing or running on systems with limited resources. Rationale: I run Comfy inside a Docker container. Using `mem_limit` doesn't hide total system RAM from Comfy, so OOM can occur easily. Cache pressure limits cause frequent out-of-memory errors. Adding this flag allows precise control over visible memory. Signed-off-by: blob42 --- comfy/cli_args.py | 1 + comfy/model_management.py | 19 +++++++++++++++---- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index d2b60e347eff..fdc31e7e2f99 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -90,6 +90,7 @@ def __call__(self, parser, namespace, values, option_string=None): parser.add_argument("--oneapi-device-selector", type=str, default=None, metavar="SELECTOR_STRING", help="Sets the oneAPI device(s) this instance will use.") parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize default when loading models with Intel's Extension for Pytorch.") parser.add_argument("--supports-fp8-compute", action="store_true", help="ComfyUI will act like if the device supports fp8 compute.") +parser.add_argument("--total-ram", type=float, default=0, help="Maximum system RAM visible to comfy in GB (default 0: all)") class LatentPreviewMethod(enum.Enum): NoPreviews = "none" diff --git a/comfy/model_management.py b/comfy/model_management.py index a9327ac80091..bc8179410d76 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -192,8 +192,12 @@ def get_total_memory(dev=None, torch_total_too=False): if dev is None: dev = get_torch_device() - if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'): - mem_total = psutil.virtual_memory().total + if hasattr(dev, "type") and (dev.type == "cpu" or dev.type == "mps"): + mem_total = 0 + if args.total_ram != 0: + mem_total = args.total_ram * 1024 * 1024 + else: + mem_total = psutil.virtual_memory().total mem_total_torch = mem_total else: if directml_enabled: @@ -236,8 +240,15 @@ def mac_version(): return None total_vram = get_total_memory(get_torch_device()) / (1024 * 1024) -total_ram = psutil.virtual_memory().total / (1024 * 1024) -logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram)) + +total_ram = 0 +if args.total_ram != 0: + total_ram = args.total_ram * (1024) # arg in GB +else: + total_ram = psutil.virtual_memory().total / (1024 * 1024) +logging.info( + "Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram) +) try: logging.info("pytorch version: {}".format(torch_version))