diff --git a/CLAUDE.md b/CLAUDE.md index e9b9589..06c9b91 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -11,7 +11,7 @@ ComfyUI-based image-to-video generation service deployed on RunPod Serverless. A ``` RunPod API Request → handler.py → ComfyUI Server (port 8188) → GPU Inference → Response ↓ - Network Volume (/userdata) for models + Network Volume (/runpod-volume) for models ``` **Key flow in handler.py:** @@ -38,7 +38,7 @@ CI/CD via Gitea Actions triggers on push to `main` branch. ```bash docker run --gpus all -p 8188:8188 \ - -v /path/to/models:/userdata/models \ + -v /path/to/models:/runpod-volume/models \ comfyui-runpod:latest ``` @@ -77,7 +77,7 @@ docker run --gpus all -p 8188:8188 \ ## Key Considerations -- Models stored on RunPod Network Volume at `/userdata/models/` +- Models stored on RunPod Network Volume at `/runpod-volume/models/` - Cold start ~30-60s for ComfyUI initialization - Large outputs (>10MB) returned as file paths, not base64 - Workflow files in `workflows/` directory (API format) diff --git a/Dockerfile b/Dockerfile index 3b74af4..e8d14de 100644 --- a/Dockerfile +++ b/Dockerfile @@ -104,26 +104,27 @@ RUN (cd ComfyUI-KJNodes && pip install -r requirements.txt || true) && \ rm -rf /root/.cache/pip /tmp/* # Create directories and symlinks to network volume +# RunPod mounts network volumes at /runpod-volume WORKDIR /workspace/ComfyUI -RUN mkdir -p /userdata/models/checkpoints \ - /userdata/models/loras \ - /userdata/models/vae \ - /userdata/models/controlnet \ - /userdata/models/clip \ - /userdata/models/upscale_models \ - /userdata/.cache/huggingface \ +RUN mkdir -p /runpod-volume/models/checkpoints \ + /runpod-volume/models/loras \ + /runpod-volume/models/vae \ + /runpod-volume/models/controlnet \ + /runpod-volume/models/clip \ + /runpod-volume/models/upscale_models \ + /runpod-volume/.cache/huggingface \ /workspace/.cache -# Symlink model directories to /userdata -RUN rm -rf models/checkpoints && ln -s /userdata/models/checkpoints models/checkpoints && \ - rm -rf models/loras && ln -s /userdata/models/loras models/loras && \ - rm -rf models/vae && ln -s /userdata/models/vae models/vae && \ - rm -rf models/controlnet && ln -s /userdata/models/controlnet models/controlnet && \ - rm -rf models/clip && ln -s /userdata/models/clip models/clip && \ - rm -rf models/upscale_models && ln -s /userdata/models/upscale_models models/upscale_models +# Symlink model directories to /runpod-volume +RUN rm -rf models/checkpoints && ln -s /runpod-volume/models/checkpoints models/checkpoints && \ + rm -rf models/loras && ln -s /runpod-volume/models/loras models/loras && \ + rm -rf models/vae && ln -s /runpod-volume/models/vae models/vae && \ + rm -rf models/controlnet && ln -s /runpod-volume/models/controlnet models/controlnet && \ + rm -rf models/clip && ln -s /runpod-volume/models/clip models/clip && \ + rm -rf models/upscale_models && ln -s /runpod-volume/models/upscale_models models/upscale_models # Symlink HuggingFace cache -RUN ln -s /userdata/.cache/huggingface /workspace/.cache/huggingface +RUN ln -s /runpod-volume/.cache/huggingface /workspace/.cache/huggingface # Copy handler and workflows WORKDIR /workspace diff --git a/handler.py b/handler.py index f427f24..90d24e4 100644 --- a/handler.py +++ b/handler.py @@ -454,7 +454,7 @@ def handler(job: dict) -> dict: # Check size for video files if output_info["type"] == "video" and len(data) > 10 * 1024 * 1024: # For large videos, save to network volume and return path - output_path = Path("/userdata/outputs") / output_info["filename"] + output_path = Path("/runpod-volume/outputs") / output_info["filename"] output_path.parent.mkdir(parents=True, exist_ok=True) output_path.write_bytes(data) results.append({