From ab73c2d9be65510b570d1959c6ff54b21fbf9731 Mon Sep 17 00:00:00 2001 From: Nick Date: Fri, 26 Dec 2025 11:55:56 +1300 Subject: [PATCH] Optimize Dockerfile layers and reduce disk usage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Combine PyTorch + triton install into single layer - Add pip cache cleanup after each install step - Change SageAttention to regular install and remove source after build - Consolidate custom node dependencies into single layer - Add CLAUDE.md, i2v-workflow.json, update handler.py and PROJECT.md 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- CLAUDE.md | 83 ++ Dockerfile | 28 +- PROJECT.md | 17 + handler.py | 269 ++++-- i2v-workflow.json | 1996 +++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 2306 insertions(+), 87 deletions(-) create mode 100644 CLAUDE.md create mode 100644 i2v-workflow.json diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..e9b9589 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,83 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Project Overview + +ComfyUI-based image-to-video generation service deployed on RunPod Serverless. Accepts base64 images and text prompts via RunPod API, processes them through ComfyUI workflows, and returns generated videos. + +## Architecture + +``` +RunPod API Request → handler.py → ComfyUI Server (port 8188) → GPU Inference → Response + ↓ + Network Volume (/userdata) for models +``` + +**Key flow in handler.py:** +1. `start_comfyui()` - Launches ComfyUI server +2. `upload_image()` - Uploads base64 image to ComfyUI +3. `inject_wan22_params()` - Injects parameters into workflow nodes +4. `queue_workflow()` - Submits to ComfyUI queue +5. `poll_for_completion()` - Polls until done (max 600s) +6. `fetch_output()` - Retrieves generated video as base64 + +## Build Commands + +```bash +# Build Docker image +docker build -t comfyui-runpod:latest . + +# Push to Gitea registry +docker push gitea.voyager.sh/nick/comfyui-serverless:latest +``` + +CI/CD via Gitea Actions triggers on push to `main` branch. + +## Local Testing + +```bash +docker run --gpus all -p 8188:8188 \ + -v /path/to/models:/userdata/models \ + comfyui-runpod:latest +``` + +## API Input Schema + +```json +{ + "image": "base64 encoded image (required)", + "prompt": "positive prompt (required)", + "negative_prompt": "optional", + "resolution": 720, + "steps": 8, + "split_step": 4, + "timeout": 600 +} +``` + +## Workflow Node Mapping (Wan22-I2V-Remix) + +| Node ID | Purpose | +|---------|---------| +| 148 | LoadImage (input) | +| 134 | CLIPTextEncode (positive prompt) | +| 137 | CLIPTextEncode (negative prompt) | +| 147 | Resolution | +| 150 | Steps | +| 151 | Split Step | +| 117 | SaveVideo (output) | + +## Stack + +- CUDA 12.8.1, Python 3.12, PyTorch 2.8.0+cu128 +- SageAttention 2.2 (compiled from source with `--no-build-isolation`) +- Nunchaku 1.0.2 +- 12 ComfyUI custom nodes (see Dockerfile) + +## Key Considerations + +- Models stored on RunPod Network Volume at `/userdata/models/` +- Cold start ~30-60s for ComfyUI initialization +- Large outputs (>10MB) returned as file paths, not base64 +- Workflow files in `workflows/` directory (API format) diff --git a/Dockerfile b/Dockerfile index 278d289..8b93350 100644 --- a/Dockerfile +++ b/Dockerfile @@ -44,21 +44,21 @@ RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.12 1 & python3.12 -m ensurepip --upgrade && \ python3.12 -m pip install --upgrade pip setuptools wheel -# Install PyTorch 2.8.0+cu128 and triton 3.4.0 +# Install PyTorch 2.8.0+cu128 and triton 3.4.0 in single layer RUN pip install \ torch==2.8.0+cu128 \ torchvision==0.23.0+cu128 \ torchaudio==2.8.0+cu128 \ - --index-url https://download.pytorch.org/whl/cu128 - -RUN pip install triton==3.4.0 + triton==3.4.0 \ + --index-url https://download.pytorch.org/whl/cu128 && \ + rm -rf /root/.cache/pip /tmp/* # Install nunchaku from GitHub wheel RUN pip install https://github.com/nunchaku-tech/nunchaku/releases/download/v1.0.2/nunchaku-1.0.2+torch2.8-cp312-cp312-linux_x86_64.whl # Install key dependencies before SageAttention COPY requirements.txt /tmp/requirements.txt -RUN pip install -r /tmp/requirements.txt +RUN pip install -r /tmp/requirements.txt && rm -rf /root/.cache/pip # Compile SageAttention 2.2 from source with no build isolation WORKDIR /tmp @@ -67,7 +67,8 @@ ENV NVCC_APPEND_FLAGS="--threads 8" ENV MAX_JOBS=32 RUN git clone https://github.com/thu-ml/SageAttention.git && \ cd SageAttention && \ - pip install --no-build-isolation -e . + pip install --no-build-isolation . && \ + cd / && rm -rf /tmp/SageAttention /root/.cache/pip # Clone ComfyUI WORKDIR /workspace @@ -90,13 +91,14 @@ RUN git clone https://github.com/ltdrdata/ComfyUI-Manager.git && \ git clone https://github.com/evanspearman/ComfyMath.git && \ git clone https://github.com/ssitu/ComfyUI_UltimateSDUpscale.git -# Install custom node dependencies -RUN cd ComfyUI-KJNodes && pip install -r requirements.txt || true -RUN cd comfyui_controlnet_aux && pip install -r requirements.txt || true -RUN cd ComfyUI-VideoHelperSuite && pip install -r requirements.txt || true -RUN cd ComfyUI-GGUF && pip install -r requirements.txt || true -RUN cd ComfyUI-Frame-Interpolation && pip install -r requirements.txt || true -RUN cd ComfyUI-nunchaku && pip install -r requirements.txt || true +# Install custom node dependencies (single layer) +RUN (cd ComfyUI-KJNodes && pip install -r requirements.txt || true) && \ + (cd comfyui_controlnet_aux && pip install -r requirements.txt || true) && \ + (cd ComfyUI-VideoHelperSuite && pip install -r requirements.txt || true) && \ + (cd ComfyUI-GGUF && pip install -r requirements.txt || true) && \ + (cd ComfyUI-Frame-Interpolation && pip install -r requirements.txt || true) && \ + (cd ComfyUI-nunchaku && pip install -r requirements.txt || true) && \ + rm -rf /root/.cache/pip /tmp/* # Create directories and symlinks to network volume WORKDIR /workspace/ComfyUI diff --git a/PROJECT.md b/PROJECT.md index b6ae73a..7bd1e7c 100644 --- a/PROJECT.md +++ b/PROJECT.md @@ -157,6 +157,23 @@ pip install --no-build-isolation -e . - Output will likely be video - ensure ffmpeg installed and handler detects output type - Reference pod uses venv - serverless image can install globally +## Workflow Node Mapping (Wan22-I2V-Remix.json) + +Handler must inject values into these nodes: + +| Purpose | Node ID | Field | Default | +|---------|---------|-------|---------| +| Image Input | 148 | widgets_values[0] | filename after upload | +| Positive Prompt | 134 | widgets_values[0] | "" | +| Negative Prompt | 137 | widgets_values[0] | (preset, usually leave) | +| Resolution | 147 | widgets_values[0] | 720 | +| Steps | 150 | widgets_values[0] | 8 | +| Split Step | 151 | widgets_values[0] | 4 | + +Output: Node 117 (SaveVideo) saves to `video/ComfyUI` prefix with h264 codec. + +Handler retrieves video from `/workspace/ComfyUI/output/video/` after workflow completes. + ## Claude Code Init Command ``` Read PROJECT.md fully. Build the Dockerfile first, matching the reference environment exactly: CUDA 12.8.1, Python 3.12, PyTorch 2.8.0+cu128, triton 3.4.0. Install nunchaku from the GitHub wheel URL. Compile SageAttention 2.2 with --no-build-isolation. Install all custom nodes listed. Symlink model paths to /userdata. Do not use a venv in the container. diff --git a/handler.py b/handler.py index ac6d9f3..f427f24 100644 --- a/handler.py +++ b/handler.py @@ -1,6 +1,15 @@ """ ComfyUI RunPod Serverless Handler Handles image/video generation workflows with ComfyUI API + +Wan22-I2V-Remix Workflow Node Mapping: +- Node 148: LoadImage - image input +- Node 134: CLIPTextEncode - positive prompt +- Node 137: CLIPTextEncode - negative prompt +- Node 147: easy int - resolution (720 default) +- Node 150: INTConstant - steps (8 default) +- Node 151: INTConstant - split_step (4 default) +- Node 117: SaveVideo - output """ import os @@ -13,7 +22,6 @@ import subprocess import signal import requests from pathlib import Path -from urllib.parse import urljoin import runpod # Configuration @@ -23,6 +31,16 @@ COMFYUI_HOST = f"http://127.0.0.1:{COMFYUI_PORT}" MAX_TIMEOUT = 600 # 10 minutes max for video generation POLL_INTERVAL = 1.0 STARTUP_TIMEOUT = 120 +DEFAULT_WORKFLOW_PATH = "/workspace/workflows/Wan22-I2V-Remix.json" + +# Wan22-I2V-Remix node IDs +NODE_IMAGE_INPUT = "148" +NODE_POSITIVE_PROMPT = "134" +NODE_NEGATIVE_PROMPT = "137" +NODE_RESOLUTION = "147" +NODE_STEPS = "150" +NODE_SPLIT_STEP = "151" +NODE_SAVE_VIDEO = "117" # Global ComfyUI process comfyui_process = None @@ -78,6 +96,91 @@ def stop_comfyui(): comfyui_process = None +def load_default_workflow() -> dict: + """Load the default Wan22-I2V-Remix workflow.""" + workflow_path = Path(DEFAULT_WORKFLOW_PATH) + if not workflow_path.exists(): + raise FileNotFoundError(f"Default workflow not found: {DEFAULT_WORKFLOW_PATH}") + + with open(workflow_path) as f: + return json.load(f) + + +def convert_frontend_to_api(frontend_workflow: dict) -> dict: + """Convert ComfyUI frontend format to API format.""" + # If already in API format (no 'nodes' key), return as-is + if "nodes" not in frontend_workflow: + return frontend_workflow + + api_workflow = {} + nodes = frontend_workflow.get("nodes", []) + links = frontend_workflow.get("links", []) + + # Build link lookup: link_id -> (source_node_id, source_slot) + link_map = {} + for link in links: + link_id, src_node, src_slot, dst_node, dst_slot, link_type = link[:6] + link_map[link_id] = (str(src_node), src_slot) + + for node in nodes: + node_id = str(node["id"]) + class_type = node.get("type", "") + + inputs = {} + + # Process widget values + widgets_values = node.get("widgets_values", []) + + # Map widget values based on class type + # This is a simplified mapping - specific nodes may need custom handling + if class_type == "LoadImage" and len(widgets_values) >= 1: + inputs["image"] = widgets_values[0] + if len(widgets_values) >= 2: + inputs["upload"] = widgets_values[1] + + elif class_type == "CLIPTextEncode" and len(widgets_values) >= 1: + inputs["text"] = widgets_values[0] + + elif class_type in ["easy int", "INTConstant"] and len(widgets_values) >= 1: + inputs["value"] = widgets_values[0] + + elif class_type == "SaveVideo" and len(widgets_values) >= 1: + inputs["filename_prefix"] = widgets_values[0] + if len(widgets_values) >= 2: + inputs["format"] = widgets_values[1] + if len(widgets_values) >= 3: + inputs["codec"] = widgets_values[2] + + elif class_type == "CreateVideo" and len(widgets_values) >= 1: + inputs["frame_rate"] = widgets_values[0] + + elif class_type == "RIFE VFI" and len(widgets_values) >= 1: + inputs["ckpt_name"] = widgets_values[0] + if len(widgets_values) >= 2: + inputs["clear_cache_after_n_frames"] = widgets_values[1] + if len(widgets_values) >= 3: + inputs["multiplier"] = widgets_values[2] + + # Process node inputs (connections) + for inp in node.get("inputs", []): + inp_name = inp["name"] + link_id = inp.get("link") + if link_id is not None and link_id in link_map: + src_node, src_slot = link_map[link_id] + inputs[inp_name] = [src_node, src_slot] + + api_workflow[node_id] = { + "class_type": class_type, + "inputs": inputs + } + + # Add meta if title exists + if "title" in node: + api_workflow[node_id]["_meta"] = {"title": node["title"]} + + return api_workflow + + def upload_image(image_base64: str, filename: str = None) -> str: """Upload base64 image to ComfyUI and return the filename.""" if filename is None: @@ -107,61 +210,33 @@ def upload_image(image_base64: str, filename: str = None) -> str: return result.get("name", filename) -def inject_prompt_into_workflow(workflow: dict, prompt: str, prompt_node_id: str = None) -> dict: - """Inject prompt text into workflow at specified node or auto-detect.""" - workflow = workflow.copy() +def inject_wan22_params(workflow: dict, params: dict) -> dict: + """Inject parameters into Wan22-I2V-Remix workflow nodes.""" + workflow = json.loads(json.dumps(workflow)) # Deep copy - # If specific node ID provided, use it - if prompt_node_id and prompt_node_id in workflow: - node = workflow[prompt_node_id] - if "inputs" in node: - # Common prompt input field names - for field in ["text", "prompt", "positive", "string"]: - if field in node["inputs"]: - node["inputs"][field] = prompt - return workflow + # Image input (node 148) + if "image_filename" in params and NODE_IMAGE_INPUT in workflow: + workflow[NODE_IMAGE_INPUT]["inputs"]["image"] = params["image_filename"] - # Auto-detect: find nodes that look like text/prompt inputs - prompt_node_types = [ - "CLIPTextEncode", - "CLIPTextEncodeSDXL", - "Text Multiline", - "String", - "TextInput" - ] + # Positive prompt (node 134) + if "prompt" in params and NODE_POSITIVE_PROMPT in workflow: + workflow[NODE_POSITIVE_PROMPT]["inputs"]["text"] = params["prompt"] - for node_id, node in workflow.items(): - class_type = node.get("class_type", "") - if class_type in prompt_node_types: - if "inputs" in node: - for field in ["text", "prompt", "positive", "string"]: - if field in node["inputs"]: - # Only inject into positive prompts, skip negative - if "negative" not in node.get("_meta", {}).get("title", "").lower(): - node["inputs"][field] = prompt - return workflow + # Negative prompt (node 137) - optional override + if "negative_prompt" in params and NODE_NEGATIVE_PROMPT in workflow: + workflow[NODE_NEGATIVE_PROMPT]["inputs"]["text"] = params["negative_prompt"] - return workflow + # Resolution (node 147) + if "resolution" in params and NODE_RESOLUTION in workflow: + workflow[NODE_RESOLUTION]["inputs"]["value"] = params["resolution"] + # Steps (node 150) + if "steps" in params and NODE_STEPS in workflow: + workflow[NODE_STEPS]["inputs"]["value"] = params["steps"] -def inject_image_into_workflow(workflow: dict, image_filename: str, image_node_id: str = None) -> dict: - """Inject uploaded image filename into workflow.""" - workflow = workflow.copy() - - # If specific node ID provided, use it - if image_node_id and image_node_id in workflow: - node = workflow[image_node_id] - if "inputs" in node: - node["inputs"]["image"] = image_filename - return workflow - - # Auto-detect: find LoadImage nodes - for node_id, node in workflow.items(): - class_type = node.get("class_type", "") - if class_type in ["LoadImage", "LoadImageFromPath"]: - if "inputs" in node: - node["inputs"]["image"] = image_filename - return workflow + # Split step (node 151) + if "split_step" in params and NODE_SPLIT_STEP in workflow: + workflow[NODE_SPLIT_STEP]["inputs"]["value"] = params["split_step"] return workflow @@ -237,7 +312,17 @@ def get_output_files(history: dict) -> list: "type_folder": img.get("type", "output") }) - # Handle video outputs (VideoHelperSuite and similar) + # Handle video outputs (SaveVideo node) + if "videos" in node_output: + for vid in node_output["videos"]: + outputs.append({ + "type": "video", + "filename": vid["filename"], + "subfolder": vid.get("subfolder", ""), + "type_folder": vid.get("type", "output") + }) + + # Handle video outputs (VideoHelperSuite gifs) if "gifs" in node_output: for vid in node_output["gifs"]: outputs.append({ @@ -280,46 +365,79 @@ def fetch_output(output_info: dict) -> bytes: def handler(job: dict) -> dict: - """RunPod serverless handler.""" + """ + RunPod serverless handler. + + Input schema: + { + "image": "base64 encoded image (required)", + "prompt": "positive prompt text (required)", + "negative_prompt": "negative prompt (optional)", + "resolution": 720 (optional, default 720), + "steps": 8 (optional, default 8), + "split_step": 4 (optional, default 4), + "timeout": 600 (optional, max 600), + "workflow": {} (optional, override default workflow) + } + """ job_input = job.get("input", {}) - # Validate input - workflow = job_input.get("workflow") - if not workflow: - return {"error": "Missing 'workflow' in input"} + # Validate required inputs + if "image" not in job_input or not job_input["image"]: + return {"error": "Missing required 'image' (base64) in input"} + + if "prompt" not in job_input or not job_input["prompt"]: + return {"error": "Missing required 'prompt' in input"} # Ensure ComfyUI is running if not start_comfyui(): return {"error": "Failed to start ComfyUI server"} try: - # Handle image upload if provided - if "image" in job_input and job_input["image"]: - image_filename = upload_image( - job_input["image"], - job_input.get("image_filename") - ) - workflow = inject_image_into_workflow( - workflow, - image_filename, - job_input.get("image_node_id") - ) + # Load workflow (custom or default) + if "workflow" in job_input and job_input["workflow"]: + workflow = job_input["workflow"] + # Convert frontend format if needed + workflow = convert_frontend_to_api(workflow) + else: + # Load and convert default workflow + frontend_workflow = load_default_workflow() + workflow = convert_frontend_to_api(frontend_workflow) - # Handle prompt injection if provided - if "prompt" in job_input and job_input["prompt"]: - workflow = inject_prompt_into_workflow( - workflow, - job_input["prompt"], - job_input.get("prompt_node_id") - ) + # Upload image + image_filename = upload_image(job_input["image"]) + print(f"Uploaded image: {image_filename}") + + # Build params for injection + params = { + "image_filename": image_filename, + "prompt": job_input["prompt"] + } + + if "negative_prompt" in job_input: + params["negative_prompt"] = job_input["negative_prompt"] + + if "resolution" in job_input: + params["resolution"] = int(job_input["resolution"]) + + if "steps" in job_input: + params["steps"] = int(job_input["steps"]) + + if "split_step" in job_input: + params["split_step"] = int(job_input["split_step"]) + + # Inject parameters into workflow + workflow = inject_wan22_params(workflow, params) # Queue workflow client_id = uuid.uuid4().hex prompt_id = queue_workflow(workflow, client_id) + print(f"Queued workflow: {prompt_id}") # Poll for completion timeout = min(job_input.get("timeout", MAX_TIMEOUT), MAX_TIMEOUT) history = poll_for_completion(prompt_id, timeout) + print("Workflow completed") # Get output files outputs = get_output_files(history) @@ -331,6 +449,7 @@ def handler(job: dict) -> dict: results = [] for output_info in outputs: data = fetch_output(output_info) + print(f"Fetched output: {output_info['filename']} ({len(data)} bytes)") # Check size for video files if output_info["type"] == "video" and len(data) > 10 * 1024 * 1024: @@ -362,6 +481,8 @@ def handler(job: dict) -> dict: except TimeoutError as e: return {"error": str(e), "status": "timeout"} except Exception as e: + import traceback + traceback.print_exc() return {"error": str(e), "status": "error"} diff --git a/i2v-workflow.json b/i2v-workflow.json new file mode 100644 index 0000000..a6558e3 --- /dev/null +++ b/i2v-workflow.json @@ -0,0 +1,1996 @@ +{ + "id": "2f0ba6fa-93b6-4799-9aaf-940a7eb0324e", + "revision": 0, + "last_node_id": 158, + "last_link_id": 333, + "nodes": [ + { + "id": 127, + "type": "WanVideoTorchCompileSettings", + "pos": [ + 159.40287714667224, + -1769.6148988489665 + ], + "size": [ + 392.0241540699825, + 250 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "torch_compile_args", + "type": "WANCOMPILEARGS", + "slot_index": 0, + "links": [ + 263, + 266 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "d9b1f4d1a5aea91d101ae97a54714a5861af3f50", + "Node name for S&R": "WanVideoTorchCompileSettings" + }, + "widgets_values": [ + "inductor", + false, + "default", + false, + 64, + true, + 128, + false, + false + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 128, + "type": "WanVideoBlockSwap", + "pos": [ + 152.7926547690492, + -1466.1654826646356 + ], + "size": [ + 395.13919061084084, + 231.29535141680748 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "block_swap_args", + "type": "BLOCKSWAPARGS", + "links": [ + 264, + 267 + ] + } + ], + "properties": { + "cnr_id": "comfyui-wanvideowrapper", + "ver": "1.3.9", + "Node name for S&R": "WanVideoBlockSwap" + }, + "widgets_values": [ + 20, + false, + false, + false, + 0, + 0, + false + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 116, + "type": "CreateVideo", + "pos": [ + 3521.2690680376036, + -2208.7224312428657 + ], + "size": [ + 347.71359350645434, + 88.08681191129648 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 242 + }, + { + "name": "audio", + "shape": 7, + "type": "AUDIO", + "link": null + } + ], + "outputs": [ + { + "name": "VIDEO", + "type": "VIDEO", + "links": [ + 243 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.45", + "Node name for S&R": "CreateVideo" + }, + "widgets_values": [ + 32 + ] + }, + { + "id": 117, + "type": "SaveVideo", + "pos": [ + 3128.0650857702185, + -1948.1311289189398 + ], + "size": [ + 801.3445639023003, + 649.900524985027 + ], + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "video", + "type": "VIDEO", + "link": 243 + } + ], + "outputs": [], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.45", + "Node name for S&R": "SaveVideo" + }, + "widgets_values": [ + "video/ComfyUI", + "auto", + "h264" + ] + }, + { + "id": 150, + "type": "INTConstant", + "pos": [ + 2191.0518131842623, + -2123.0850788511125 + ], + "size": [ + 210, + 58 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "value", + "type": "INT", + "links": [ + 299, + 300 + ] + } + ], + "title": "Steps", + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "a6b867b63a29ca48ddb15c589e17a9f2d8530d57", + "Node name for S&R": "INTConstant", + "ue_properties": { + "version": "7.1", + "widget_ue_connectable": {}, + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 8 + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 151, + "type": "INTConstant", + "pos": [ + 2196.7205746726668, + -2002.552205948389 + ], + "size": [ + 210, + 58 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "value", + "type": "INT", + "links": [ + 301, + 302 + ] + } + ], + "title": "Split_step", + "properties": { + "cnr_id": "comfyui-kjnodes", + "ver": "a6b867b63a29ca48ddb15c589e17a9f2d8530d57", + "Node name for S&R": "INTConstant", + "ue_properties": { + "version": "7.1", + "widget_ue_connectable": {}, + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + 4 + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 133, + "type": "CLIPLoader", + "pos": [ + 1068.9605865919011, + -1493.8617600520472 + ], + "size": [ + 473.63328946919273, + 106 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "slot_index": 0, + "links": [ + 269, + 271 + ] + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.45", + "Node name for S&R": "CLIPLoader", + "models": [ + { + "name": "umt5_xxl_fp8_e4m3fn_scaled.safetensors", + "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors", + "directory": "text_encoders" + } + ] + }, + "widgets_values": [ + "nsfw_wan_umt5-xxl_fp8_scaled.safetensors", + "wan", + "default" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 137, + "type": "CLIPTextEncode", + "pos": [ + 1646.314370939116, + -2130.5628322834104 + ], + "size": [ + 425.27801513671875, + 180.6060791015625 + ], + "flags": { + "collapsed": false + }, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 271 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 325 + ] + } + ], + "title": "CLIP Text Encode (Negative Prompt)", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.45", + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "low quality, lowres, bad hands, extra limbs, missing fingers, poorly drawn face, bad anatomy, blurred, jpeg artifacts, deformed, ugly, bad proportions, disfigured, watermark, text, logo, signature, unrealistic eyes, cross-eyed, lopsided, bad lighting, harsh shadows, flat shading, unshapely body, pixelated, duplicate limbs, bad perspective, morphed, distorted, glitch, malformed hands, distorted fingers, noisy background, overly saturated, unnatural colors, lens distortion, grainy, low detail," + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 132, + "type": "WanVideoModelLoader", + "pos": [ + 1062.6700769132297, + -1881.564452504654 + ], + "size": [ + 477.4410095214844, + 338 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "compile_args", + "shape": 7, + "type": "WANCOMPILEARGS", + "link": 266 + }, + { + "name": "block_swap_args", + "shape": 7, + "type": "BLOCKSWAPARGS", + "link": 267 + }, + { + "name": "lora", + "shape": 7, + "type": "WANVIDLORA", + "link": 310 + }, + { + "name": "vram_management_args", + "shape": 7, + "type": "VRAM_MANAGEMENTARGS", + "link": null + }, + { + "name": "extra_model", + "shape": 7, + "type": "VACEPATH", + "link": null + }, + { + "name": "fantasytalking_model", + "shape": 7, + "type": "FANTASYTALKINGMODEL", + "link": null + }, + { + "name": "multitalk_model", + "shape": 7, + "type": "MULTITALKMODEL", + "link": null + }, + { + "name": "fantasyportrait_model", + "shape": 7, + "type": "FANTASYPORTRAITMODEL", + "link": null + }, + { + "name": "vace_model", + "shape": 7, + "type": "VACEPATH", + "link": null + } + ], + "outputs": [ + { + "name": "model", + "type": "WANVIDEOMODEL", + "slot_index": 0, + "links": [ + 276 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "d9b1f4d1a5aea91d101ae97a54714a5861af3f50", + "Node name for S&R": "WanVideoModelLoader" + }, + "widgets_values": [ + "Wan2.2_Remix_NSFW_i2v_14b_low_lighting_v2.0.safetensors", + "fp16_fast", + "disabled", + "offload_device", + "sageattn", + "default" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 131, + "type": "WanVideoModelLoader", + "pos": [ + 1060.5491719600273, + -2271.0879504278537 + ], + "size": [ + 477.4410095214844, + 338 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "compile_args", + "shape": 7, + "type": "WANCOMPILEARGS", + "link": 263 + }, + { + "name": "block_swap_args", + "shape": 7, + "type": "BLOCKSWAPARGS", + "link": 264 + }, + { + "name": "lora", + "shape": 7, + "type": "WANVIDLORA", + "link": 309 + }, + { + "name": "vram_management_args", + "shape": 7, + "type": "VRAM_MANAGEMENTARGS", + "link": null + }, + { + "name": "extra_model", + "shape": 7, + "type": "VACEPATH", + "link": null + }, + { + "name": "fantasytalking_model", + "shape": 7, + "type": "FANTASYTALKINGMODEL", + "link": null + }, + { + "name": "multitalk_model", + "shape": 7, + "type": "MULTITALKMODEL", + "link": null + }, + { + "name": "fantasyportrait_model", + "shape": 7, + "type": "FANTASYPORTRAITMODEL", + "link": null + }, + { + "name": "vace_model", + "shape": 7, + "type": "VACEPATH", + "link": null + } + ], + "outputs": [ + { + "name": "model", + "type": "WANVIDEOMODEL", + "slot_index": 0, + "links": [ + 275 + ] + } + ], + "properties": { + "cnr_id": "ComfyUI-WanVideoWrapper", + "ver": "d9b1f4d1a5aea91d101ae97a54714a5861af3f50", + "Node name for S&R": "WanVideoModelLoader" + }, + "widgets_values": [ + "Wan2.2_Remix_NSFW_i2v_14b_high_lighting_v2.0.safetensors", + "fp16_fast", + "disabled", + "offload_device", + "sageattn", + "default" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 130, + "type": "WanVideoLoraSelect", + "pos": [ + 606.415170671469, + -1743.2624190518252 + ], + "size": [ + 396.89430292841746, + 200 + ], + "flags": {}, + "order": 5, + "mode": 4, + "inputs": [ + { + "name": "prev_lora", + "shape": 7, + "type": "WANVIDLORA", + "link": null + }, + { + "name": "blocks", + "shape": 7, + "type": "SELECTEDBLOCKS", + "link": null + } + ], + "outputs": [ + { + "name": "lora", + "type": "WANVIDLORA", + "links": [ + 309 + ] + } + ], + "properties": { + "cnr_id": "comfyui-wanvideowrapper", + "ver": "1.3.9", + "Node name for S&R": "WanVideoLoraSelect" + }, + "widgets_values": [ + "Wan2.2-Lightning_I2V-A14B-4steps-lora_HIGH_fp16.safetensors", + 1, + false, + false + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 129, + "type": "WanVideoLoraSelect", + "pos": [ + 602.6211059276344, + -1491.750075696995 + ], + "size": [ + 396.89430292841746, + 200 + ], + "flags": {}, + "order": 6, + "mode": 4, + "inputs": [ + { + "name": "prev_lora", + "shape": 7, + "type": "WANVIDLORA", + "link": null + }, + { + "name": "blocks", + "shape": 7, + "type": "SELECTEDBLOCKS", + "link": null + } + ], + "outputs": [ + { + "name": "lora", + "type": "WANVIDLORA", + "links": [ + 310 + ] + } + ], + "properties": { + "cnr_id": "comfyui-wanvideowrapper", + "ver": "1.3.9", + "Node name for S&R": "WanVideoLoraSelect" + }, + "widgets_values": [ + "Wan2.2-Lightning_I2V-A14B-4steps-lora_LOW_fp16.safetensors", + 1, + false, + false + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 139, + "type": "WanVideoSampler", + "pos": [ + 2485.0131852024583, + -2236.6527625890203 + ], + "size": [ + 267.80859375, + 889.0211057504168 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "WANVIDEOMODEL", + "link": 275 + }, + { + "name": "image_embeds", + "type": "WANVIDIMAGE_EMBEDS", + "link": 328 + }, + { + "name": "text_embeds", + "shape": 7, + "type": "WANVIDEOTEXTEMBEDS", + "link": 277 + }, + { + "name": "samples", + "shape": 7, + "type": "LATENT", + "link": null + }, + { + "name": "feta_args", + "shape": 7, + "type": "FETAARGS", + "link": null + }, + { + "name": "context_options", + "shape": 7, + "type": "WANVIDCONTEXT", + "link": null + }, + { + "name": "cache_args", + "shape": 7, + "type": "CACHEARGS", + "link": null + }, + { + "name": "flowedit_args", + "shape": 7, + "type": "FLOWEDITARGS", + "link": null + }, + { + "name": "slg_args", + "shape": 7, + "type": "SLGARGS", + "link": null + }, + { + "name": "loop_args", + "shape": 7, + "type": "LOOPARGS", + "link": null + }, + { + "name": "experimental_args", + "shape": 7, + "type": "EXPERIMENTALARGS", + "link": null + }, + { + "name": "sigmas", + "shape": 7, + "type": "SIGMAS", + "link": null + }, + { + "name": "unianimate_poses", + "shape": 7, + "type": "UNIANIMATE_POSE", + "link": null + }, + { + "name": "fantasytalking_embeds", + "shape": 7, + "type": "FANTASYTALKING_EMBEDS", + "link": null + }, + { + "name": "uni3c_embeds", + "shape": 7, + "type": "UNI3C_EMBEDS", + "link": null + }, + { + "name": "multitalk_embeds", + "shape": 7, + "type": "MULTITALK_EMBEDS", + "link": null + }, + { + "name": "freeinit_args", + "shape": 7, + "type": "FREEINITARGS", + "link": null + }, + { + "name": "steps", + "type": "INT", + "widget": { + "name": "steps" + }, + "link": 299 + }, + { + "name": "end_step", + "shape": 7, + "type": "INT", + "widget": { + "name": "end_step" + }, + "link": 301 + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 274 + ] + }, + { + "name": "denoised_samples", + "type": "LATENT", + "links": null + } + ], + "properties": { + "cnr_id": "comfyui-wanvideowrapper", + "ver": "1.3.9", + "Node name for S&R": "WanVideoSampler" + }, + "widgets_values": [ + 4, + 1, + 8, + 246405119682734, + "randomize", + true, + "euler", + 0, + 1, + false, + "comfy", + 0, + 2, + true + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 138, + "type": "WanVideoTextEmbedBridge", + "pos": [ + 2188.739218043352, + -2303.268193668137 + ], + "size": [ + 222.0025390625, + 46 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "link": 324 + }, + { + "name": "negative", + "shape": 7, + "type": "CONDITIONING", + "link": 325 + } + ], + "outputs": [ + { + "name": "text_embeds", + "type": "WANVIDEOTEXTEMBEDS", + "links": [ + 277, + 278 + ] + } + ], + "properties": { + "cnr_id": "comfyui-wanvideowrapper", + "ver": "1.3.9", + "Node name for S&R": "WanVideoTextEmbedBridge" + }, + "widgets_values": [] + }, + { + "id": 157, + "type": "WanVideoVAELoader", + "pos": [ + 1077.9028462123924, + -1329.4659494458456 + ], + "size": [ + 461.7992150216228, + 102.1893910549079 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "compile_args", + "shape": 7, + "type": "WANCOMPILEARGS", + "link": null + } + ], + "outputs": [ + { + "name": "vae", + "type": "WANVAE", + "links": [ + 323, + 331 + ] + } + ], + "properties": { + "cnr_id": "comfyui-wanvideowrapper", + "ver": "1.3.9", + "Node name for S&R": "WanVideoVAELoader" + }, + "widgets_values": [ + "wan_2.1_vae.safetensors", + "bf16" + ] + }, + { + "id": 140, + "type": "WanVideoSampler", + "pos": [ + 2779.188007763731, + -2237.900732473446 + ], + "size": [ + 267.80859375, + 891.9053044725463 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "WANVIDEOMODEL", + "link": 276 + }, + { + "name": "image_embeds", + "type": "WANVIDIMAGE_EMBEDS", + "link": 329 + }, + { + "name": "text_embeds", + "shape": 7, + "type": "WANVIDEOTEXTEMBEDS", + "link": 278 + }, + { + "name": "samples", + "shape": 7, + "type": "LATENT", + "link": 274 + }, + { + "name": "feta_args", + "shape": 7, + "type": "FETAARGS", + "link": null + }, + { + "name": "context_options", + "shape": 7, + "type": "WANVIDCONTEXT", + "link": null + }, + { + "name": "cache_args", + "shape": 7, + "type": "CACHEARGS", + "link": null + }, + { + "name": "flowedit_args", + "shape": 7, + "type": "FLOWEDITARGS", + "link": null + }, + { + "name": "slg_args", + "shape": 7, + "type": "SLGARGS", + "link": null + }, + { + "name": "loop_args", + "shape": 7, + "type": "LOOPARGS", + "link": null + }, + { + "name": "experimental_args", + "shape": 7, + "type": "EXPERIMENTALARGS", + "link": null + }, + { + "name": "sigmas", + "shape": 7, + "type": "SIGMAS", + "link": null + }, + { + "name": "unianimate_poses", + "shape": 7, + "type": "UNIANIMATE_POSE", + "link": null + }, + { + "name": "fantasytalking_embeds", + "shape": 7, + "type": "FANTASYTALKING_EMBEDS", + "link": null + }, + { + "name": "uni3c_embeds", + "shape": 7, + "type": "UNI3C_EMBEDS", + "link": null + }, + { + "name": "multitalk_embeds", + "shape": 7, + "type": "MULTITALK_EMBEDS", + "link": null + }, + { + "name": "freeinit_args", + "shape": 7, + "type": "FREEINITARGS", + "link": null + }, + { + "name": "steps", + "type": "INT", + "widget": { + "name": "steps" + }, + "link": 300 + }, + { + "name": "start_step", + "shape": 7, + "type": "INT", + "widget": { + "name": "start_step" + }, + "link": 302 + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 332 + ] + }, + { + "name": "denoised_samples", + "type": "LATENT", + "links": [] + } + ], + "properties": { + "cnr_id": "comfyui-wanvideowrapper", + "ver": "1.3.9", + "Node name for S&R": "WanVideoSampler" + }, + "widgets_values": [ + 4, + 1, + 8, + 941824044416901, + "fixed", + true, + "euler", + 0, + 1, + false, + "comfy", + 2, + 10000, + false + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 115, + "type": "RIFE VFI", + "pos": [ + 3117.608530800124, + -2208.210472112985 + ], + "size": [ + 349.6578063964844, + 198 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "frames", + "type": "IMAGE", + "link": 333 + }, + { + "name": "optional_interpolation_states", + "shape": 7, + "type": "INTERPOLATION_STATES", + "link": null + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 242 + ] + } + ], + "properties": { + "cnr_id": "comfyui-frame-interpolation", + "ver": "1.0.7", + "Node name for S&R": "RIFE VFI" + }, + "widgets_values": [ + "rife47.pth", + 50, + 2, + true, + true, + 1 + ] + }, + { + "id": 156, + "type": "WanVideoImageToVideoEncode", + "pos": [ + 2114.9033867599255, + -1629.4226165473308 + ], + "size": [ + 308.23203125, + 390 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "vae", + "shape": 7, + "type": "WANVAE", + "link": 323 + }, + { + "name": "clip_embeds", + "shape": 7, + "type": "WANVIDIMAGE_CLIPEMBEDS", + "link": null + }, + { + "name": "start_image", + "shape": 7, + "type": "IMAGE", + "link": 330 + }, + { + "name": "end_image", + "shape": 7, + "type": "IMAGE", + "link": null + }, + { + "name": "control_embeds", + "shape": 7, + "type": "WANVIDIMAGE_EMBEDS", + "link": null + }, + { + "name": "temporal_mask", + "shape": 7, + "type": "MASK", + "link": null + }, + { + "name": "extra_latents", + "shape": 7, + "type": "LATENT", + "link": null + }, + { + "name": "add_cond_latents", + "shape": 7, + "type": "ADD_COND_LATENTS", + "link": null + }, + { + "name": "width", + "type": "INT", + "widget": { + "name": "width" + }, + "link": 326 + }, + { + "name": "height", + "type": "INT", + "widget": { + "name": "height" + }, + "link": 327 + } + ], + "outputs": [ + { + "name": "image_embeds", + "type": "WANVIDIMAGE_EMBEDS", + "links": [ + 328, + 329 + ] + } + ], + "properties": { + "cnr_id": "comfyui-wanvideowrapper", + "ver": "1.3.9", + "Node name for S&R": "WanVideoImageToVideoEncode" + }, + "widgets_values": [ + 832, + 480, + 65, + 0, + 1, + 1, + true, + false, + false + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 145, + "type": "MathExpression|pysssss", + "pos": [ + 1885.943053745727, + -1420.3096672282898 + ], + "size": [ + 210, + 154.56871032714844 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "a", + "shape": 7, + "type": "INT,FLOAT,IMAGE,LATENT", + "link": 287 + }, + { + "name": "b", + "shape": 7, + "type": "INT,FLOAT,IMAGE,LATENT", + "link": 288 + }, + { + "name": "c", + "shape": 7, + "type": "INT,FLOAT,IMAGE,LATENT", + "link": 289 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 327 + ] + }, + { + "name": "FLOAT", + "type": "FLOAT", + "links": null + } + ], + "title": "Height Calculation", + "properties": { + "cnr_id": "comfyui-custom-scripts", + "ver": "1.2.5", + "Node name for S&R": "MathExpression|pysssss" + }, + "widgets_values": [ + "(round(b / min(a,b) * c) // 16) * 16" + ] + }, + { + "id": 158, + "type": "WanVideoDecode", + "pos": [ + 2939.221257509142, + -2392.0103457137147 + ], + "size": [ + 270, + 198 + ], + "flags": { + "collapsed": true + }, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "WANVAE", + "link": 331 + }, + { + "name": "samples", + "type": "LATENT", + "link": 332 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 333 + ] + } + ], + "properties": { + "cnr_id": "comfyui-wanvideowrapper", + "ver": "1.3.9", + "Node name for S&R": "WanVideoDecode" + }, + "widgets_values": [ + false, + 272, + 272, + 144, + 128, + "default" + ] + }, + { + "id": 143, + "type": "GetImageSize", + "pos": [ + 1652.938059075249, + -1621.5154346398244 + ], + "size": [ + 210, + 136 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 292 + } + ], + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 284, + 287 + ] + }, + { + "name": "height", + "type": "INT", + "links": [ + 285, + 288 + ] + }, + { + "name": "batch_size", + "type": "INT", + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.61", + "Node name for S&R": "GetImageSize" + }, + "widgets_values": [] + }, + { + "id": 144, + "type": "MathExpression|pysssss", + "pos": [ + 1887.2120760318778, + -1625.0738914605 + ], + "size": [ + 210, + 154.56871032714844 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "a", + "shape": 7, + "type": "INT,FLOAT,IMAGE,LATENT", + "link": 284 + }, + { + "name": "b", + "shape": 7, + "type": "INT,FLOAT,IMAGE,LATENT", + "link": 285 + }, + { + "name": "c", + "shape": 7, + "type": "INT,FLOAT,IMAGE,LATENT", + "link": 286 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 326 + ] + }, + { + "name": "FLOAT", + "type": "FLOAT", + "links": null + } + ], + "title": "Width Calculation", + "properties": { + "cnr_id": "comfyui-custom-scripts", + "ver": "1.2.5", + "Node name for S&R": "MathExpression|pysssss" + }, + "widgets_values": [ + "(round(a / min(a,b) * c) // 16) * 16" + ] + }, + { + "id": 147, + "type": "easy int", + "pos": [ + 1690.6162147854718, + -1763.1372085043379 + ], + "size": [ + 701.092286378002, + 58 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "int", + "type": "INT", + "links": [ + 286, + 289 + ] + } + ], + "title": "Set Resolution", + "properties": { + "cnr_id": "comfyui-easy-use", + "ver": "1.3.4", + "Node name for S&R": "easy int" + }, + "widgets_values": [ + 720 + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 148, + "type": "LoadImage", + "pos": [ + 229.36172447375245, + -2295.29616043407 + ], + "size": [ + 724.1658278243838, + 326 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "slot_index": 0, + "links": [ + 292, + 330 + ] + }, + { + "name": "MASK", + "type": "MASK", + "slot_index": 1, + "links": null + } + ], + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.45", + "Node name for S&R": "LoadImage", + "ue_properties": { + "widget_ue_connectable": {}, + "version": "7.1", + "input_ue_unconnectable": {} + } + }, + "widgets_values": [ + "ComfyUI_00006_.png", + "image" + ] + }, + { + "id": 134, + "type": "CLIPTextEncode", + "pos": [ + 1638.3661117360386, + -2393.065065199377 + ], + "size": [ + 435.66263809468705, + 212.73511777393867 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 269 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "slot_index": 0, + "links": [ + 324 + ] + } + ], + "title": "CLIP Text Encode (Positive Prompt)", + "properties": { + "cnr_id": "comfy-core", + "ver": "0.3.45", + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "" + ], + "color": "#232", + "bgcolor": "#353" + } + ], + "links": [ + [ + 242, + 115, + 0, + 116, + 0, + "IMAGE" + ], + [ + 243, + 116, + 0, + 117, + 0, + "VIDEO" + ], + [ + 263, + 127, + 0, + 131, + 0, + "WANCOMPILEARGS" + ], + [ + 264, + 128, + 0, + 131, + 1, + "BLOCKSWAPARGS" + ], + [ + 266, + 127, + 0, + 132, + 0, + "WANCOMPILEARGS" + ], + [ + 267, + 128, + 0, + 132, + 1, + "BLOCKSWAPARGS" + ], + [ + 269, + 133, + 0, + 134, + 0, + "CLIP" + ], + [ + 271, + 133, + 0, + 137, + 0, + "CLIP" + ], + [ + 274, + 139, + 0, + 140, + 3, + "LATENT" + ], + [ + 275, + 131, + 0, + 139, + 0, + "WANVIDEOMODEL" + ], + [ + 276, + 132, + 0, + 140, + 0, + "WANVIDEOMODEL" + ], + [ + 277, + 138, + 0, + 139, + 2, + "WANVIDEOTEXTEMBEDS" + ], + [ + 278, + 138, + 0, + 140, + 2, + "WANVIDEOTEXTEMBEDS" + ], + [ + 284, + 143, + 0, + 144, + 0, + "INT,FLOAT,IMAGE,LATENT" + ], + [ + 285, + 143, + 1, + 144, + 1, + "INT,FLOAT,IMAGE,LATENT" + ], + [ + 286, + 147, + 0, + 144, + 2, + "INT,FLOAT,IMAGE,LATENT" + ], + [ + 287, + 143, + 0, + 145, + 0, + "INT,FLOAT,IMAGE,LATENT" + ], + [ + 288, + 143, + 1, + 145, + 1, + "INT,FLOAT,IMAGE,LATENT" + ], + [ + 289, + 147, + 0, + 145, + 2, + "INT,FLOAT,IMAGE,LATENT" + ], + [ + 292, + 148, + 0, + 143, + 0, + "IMAGE" + ], + [ + 299, + 150, + 0, + 139, + 17, + "INT" + ], + [ + 300, + 150, + 0, + 140, + 17, + "INT" + ], + [ + 301, + 151, + 0, + 139, + 18, + "INT" + ], + [ + 302, + 151, + 0, + 140, + 18, + "INT" + ], + [ + 309, + 130, + 0, + 131, + 2, + "WANVIDLORA" + ], + [ + 310, + 129, + 0, + 132, + 2, + "WANVIDLORA" + ], + [ + 323, + 157, + 0, + 156, + 0, + "WANVAE" + ], + [ + 324, + 134, + 0, + 138, + 0, + "CONDITIONING" + ], + [ + 325, + 137, + 0, + 138, + 1, + "CONDITIONING" + ], + [ + 326, + 144, + 0, + 156, + 8, + "INT" + ], + [ + 327, + 145, + 0, + 156, + 9, + "INT" + ], + [ + 328, + 156, + 0, + 139, + 1, + "WANVIDIMAGE_EMBEDS" + ], + [ + 329, + 156, + 0, + 140, + 1, + "WANVIDIMAGE_EMBEDS" + ], + [ + 330, + 148, + 0, + 156, + 2, + "IMAGE" + ], + [ + 331, + 157, + 0, + 158, + 0, + "WANVAE" + ], + [ + 332, + 140, + 0, + 158, + 1, + "LATENT" + ], + [ + 333, + 158, + 0, + 115, + 0, + "IMAGE" + ] + ], + "groups": [ + { + "id": 10, + "title": "Torch & BlockSwap Settings", + "bounding": [ + 143.13111359611818, + -1918.4501230958715, + 431.99532517428696, + 730.7324389598393 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 11, + "title": "Optional - Lightning LoRA's", + "bounding": [ + 596.4880032688702, + -1914.1644858622692, + 422.4607839441061, + 722.3897153834312 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 12, + "title": "Load Models", + "bounding": [ + 1044.2731518818039, + -2355.499116739233, + 523.8868499154621, + 1171.380796453875 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 13, + "title": "Prompt", + "bounding": [ + 1616.7925501619102, + -2480.323169759354, + 488.6849423119852, + 570.7599158643834 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 14, + "title": "Frame Interpolation & Save Video", + "bounding": [ + 3085.0841958373203, + -2350.82909154303, + 909.3600091280955, + 1101.5831873783357 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 15, + "title": "Resolution", + "bounding": [ + 1632.8812713074583, + -1875.5623412613659, + 801.3315686991712, + 687.8660132128684 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 16, + "title": "Steps", + "bounding": [ + 2178.713322185243, + -2204.5807121133494, + 239.46159293264282, + 289.3928272266164 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + }, + { + "id": 17, + "title": "Upload Image", + "bounding": [ + 148.2969064521084, + -2392.8253918058485, + 861.1247302673673, + 449.143286077629 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + } + ], + "config": {}, + "extra": { + "ds": { + "scale": 0.5730855330117446, + "offset": [ + -220.47032824069905, + 2547.4459340193275 + ] + }, + "frontendVersion": "1.28.8", + "VHS_latentpreview": false, + "VHS_latentpreviewrate": 0, + "VHS_MetadataImage": true, + "VHS_KeepIntermediate": true, + "ue_links": [], + "links_added_by_ue": [] + }, + "version": 0.4 +} \ No newline at end of file