{ "last_node_id": 12, "last_link_id": 10, "nodes": [ { "id": 5, "type": "VAELoader", "pos": [ 455.3836975097656, 522.6910400390625 ], "size": [ 315, 58 ], "flags": {}, "order": 0, "mode": 0, "inputs": [], "outputs": [ { "name": "VAE", "type": "VAE", "links": [ 3 ], "slot_index": 0 } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.15", "Node name for S&R": "VAELoader" }, "widgets_values": [ "ae.safetensors" ] }, { "id": 8, "type": "CLIPLoader", "pos": [ 450.87408447265625, 665.6912231445312 ], "size": [ 315, 82 ], "flags": {}, "order": 1, "mode": 0, "inputs": [], "outputs": [ { "name": "CLIP", "type": "CLIP", "links": [ 4, 5 ], "slot_index": 0 } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.15", "Node name for S&R": "CLIPLoader" }, "widgets_values": [ "gemma_2_2b_fp16.safetensors", "lumina2", "default" ] }, { "id": 9, "type": "EmptySD3LatentImage", "pos": [ 448.90447998046875, 814.5116577148438 ], "size": [ 315, 106 ], "flags": {}, "order": 2, "mode": 0, "inputs": [], "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 6 ], "slot_index": 0 } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.15", "Node name for S&R": "EmptySD3LatentImage" }, "widgets_values": [ 1024, 1552, 1 ] }, { "id": 1, "type": "UNETLoader", "pos": [ 247.75680541992188, 162.58412170410156 ], "size": [ 708.6932983398438, 90.14027404785156 ], "flags": {}, "order": 3, "mode": 0, "inputs": [], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 1 ], "slot_index": 0 } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.15", "Node name for S&R": "UNETLoader" }, "widgets_values": [ "lu2/results_cosine_2e-4_bs64_infallssssuum/checkpoint-e4_s68303/consolidated.00-of-01.pth", "default" ] }, { "id": 2, "type": "ModelSamplingAuraFlow", "pos": [ 989.8890380859375, 174.9385986328125 ], "size": [ 315, 58 ], "flags": {}, "order": 7, "mode": 0, "inputs": [ { "name": "model", "type": "MODEL", "link": 1 } ], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 2 ], "slot_index": 0 } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.15", "Node name for S&R": "ModelSamplingAuraFlow" }, "widgets_values": [ 6 ] }, { "id": 3, "type": "KSampler", "pos": [ 1331.6990966796875, 173.83229064941406 ], "size": [ 315, 262 ], "flags": {}, "order": 8, "mode": 0, "inputs": [ { "name": "model", "type": "MODEL", "link": 2 }, { "name": "positive", "type": "CONDITIONING", "link": 7 }, { "name": "negative", "type": "CONDITIONING", "link": 8 }, { "name": "latent_image", "type": "LATENT", "link": 6 } ], "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 9 ], "slot_index": 0 } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.15", "Node name for S&R": "KSampler" }, "widgets_values": [ 1066916133633552, "randomize", 30, 5.5, "res_multistep", "linear_quadratic", 1 ] }, { "id": 4, "type": "VAEDecode", "pos": [ 1677.12353515625, 177.60211181640625 ], "size": [ 210, 46 ], "flags": {}, "order": 9, "mode": 0, "inputs": [ { "name": "samples", "type": "LATENT", "link": 9 }, { "name": "vae", "type": "VAE", "link": 3 } ], "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 10 ], "slot_index": 0 } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.15", "Node name for S&R": "VAEDecode" }, "widgets_values": [] }, { "id": 11, "type": "PreviewImage", "pos": [ 1926.0179443359375, 205.5092315673828 ], "size": [ 578.5769653320312, 531.48828125 ], "flags": {}, "order": 10, "mode": 0, "inputs": [ { "name": "images", "type": "IMAGE", "link": 10 } ], "outputs": [], "properties": { "cnr_id": "comfy-core", "ver": "0.3.15", "Node name for S&R": "PreviewImage" }, "widgets_values": [] }, { "id": 7, "type": "CLIPTextEncode", "pos": [ 891.7837524414062, 902.3532104492188 ], "size": [ 425.27801513671875, 180.6060791015625 ], "flags": {}, "order": 6, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", "link": 5 } ], "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 8 ], "slot_index": 0 } ], "title": "CLIP Text Encode (Negative Prompt)", "properties": { "cnr_id": "comfy-core", "ver": "0.3.15", "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "You are an assistant designed to generate images based on textual prompts. ai generated image.blurry, worst quality, low quality" ], "color": "#322", "bgcolor": "#533" }, { "id": 6, "type": "CLIPTextEncode", "pos": [ 890.725341796875, 523.7492065429688 ], "size": [ 416.1895446777344, 330.5545654296875 ], "flags": {}, "order": 5, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", "link": 4 } ], "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 7 ], "slot_index": 0 } ], "title": "CLIP Text Encode (Positive Prompt)", "properties": { "cnr_id": "comfy-core", "ver": "0.3.15", "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "You are an assistant designed to generate anime images based on textual prompts. \n1girl,\nmasterpiece, best quality\n" ], "color": "#232", "bgcolor": "#353" }, { "id": 12, "type": "Note", "pos": [ 71.90563201904297, 316.9867248535156 ], "size": [ 319.26513671875, 197.89625549316406 ], "flags": {}, "order": 4, "mode": 0, "inputs": [], "outputs": [], "properties": {}, "widgets_values": [ "The \"You are an assistant... \" text before the actual prompt is the one used in the official example.\n\nThe reason it is exposed to the user like this is because the model still works if you modify or remove it." ], "color": "#432", "bgcolor": "#653" } ], "links": [ [ 1, 1, 0, 2, 0, "MODEL" ], [ 2, 2, 0, 3, 0, "MODEL" ], [ 3, 5, 0, 4, 1, "VAE" ], [ 4, 8, 0, 6, 0, "CLIP" ], [ 5, 8, 0, 7, 0, "CLIP" ], [ 6, 9, 0, 3, 3, "LATENT" ], [ 7, 6, 0, 3, 1, "CONDITIONING" ], [ 8, 7, 0, 3, 2, "CONDITIONING" ], [ 9, 3, 0, 4, 0, "LATENT" ], [ 10, 4, 0, 11, 0, "IMAGE" ] ], "groups": [], "config": {}, "extra": { "ds": { "scale": 0.8390545288824954, "offset": [ -36.568109874487064, -77.00281566409807 ] }, "VHS_latentpreview": false, "VHS_latentpreviewrate": 0, "VHS_MetadataImage": true, "VHS_KeepIntermediate": true }, "version": 0.4 }