File size: 6,319 Bytes
3300d90
 
 
93d55f4
3300d90
 
 
 
 
 
0390fed
 
 
 
 
 
 
 
 
 
3300d90
 
0158ac0
3300d90
 
 
 
 
 
 
 
 
 
93d55f4
3300d90
0158ac0
f381d75
3300d90
 
 
 
 
 
 
0158ac0
3300d90
93d55f4
 
 
 
 
 
 
 
7fc69e4
93d55f4
 
 
 
 
 
 
 
 
 
c77be6c
93d55f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3300d90
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import os
import random
import requests
import fal_client

#Todo
# Something to select the pose from the list
# a way to support loras directly
# A way to get a description of the outfit

def import_lora(lora_name):
    print("we are importing a lora")
    return("Tetsok")

def return_pose(pose_id: int):
    print("we are returning a pose")
    return("Tetsok")




def run_workflow(body):
    print("run workflow")
    url = "https://comfy.icu/api/v1/workflows/" + body['workflow_id'] + "/runs"
    headers = {
        "accept": "application/json",
        "content-type": "application/json",
        "authorization": "Bearer " + os.environ['COMFYICU_API_KEY']
    }

    response = requests.post(url, headers=headers, json=body)
    return response.json()

def comfy_create_image(character_lora, character_keyword, outfit_desc, pose_id, num_outputs): 
    seed = random.randint(0, 1000000)
    print(f"seed: {seed}")
    prompt = {"56": {"_meta": {"title": "CLIP Text Encode (Prompt)"}, "inputs": {"clip": ["365", 1], "text": ""}, "class_type": "CLIPTextEncode"}, "159": {"_meta": {"title": "Load VAE"}, "inputs": {"vae_name": "flux1-ae.safetensors"}, "class_type": "VAELoader"}, "175": {"_meta": {"title": "Apply ControlNet"}, "inputs": {"vae": ["159", 0], "image": ["369", 0], "negative": ["56", 0], "positive": ["199", 0], "strength": 0.7000000000000001, "control_net": ["260", 0], "end_percent": 0.5, "start_percent": 0}, "class_type": "ControlNetApplyAdvanced"}, "199": {"_meta": {"title": "CLIP Text Encode (Prompt)"}, "inputs": {"clip": ["365", 1], "text": f"Fashion model {character_keyword} wearing {outfit_desc}. posing in front of white background"}, "class_type": "CLIPTextEncode"}, "260": {"_meta": {"title": "Load ControlNet Model"}, "inputs": {"control_net_name": "flux.1-dev-controlnet-union.safetensors"}, "class_type": "ControlNetLoader"}, "263": {"_meta": {"title": "Save Image"}, "inputs": {"images": ["311", 0], "filename_prefix": "ControlNet"}, "class_type": "SaveImage"}, "307": {"_meta": {"title": "FluxGuidance"}, "inputs": {"guidance": 3.5, "conditioning": ["175", 0]}, "class_type": "FluxGuidance"}, "308": {"_meta": {"title": "KSampler"}, "inputs": {"cfg": 1, "seed": seed, "model": ["365", 0], "steps": 20, "denoise": 1, "negative": ["335", 0], "positive": ["307", 0], "scheduler": "simple", "latent_image": ["344", 0], "sampler_name": "euler"}, "class_type": "KSampler"}, "310": {"_meta": {"title": "DualCLIPLoader"}, "inputs": {"type": "flux", "device": "default", "clip_name1": "t5xxl_fp8_e4m3fn.safetensors", "clip_name2": "clip_l.safetensors"}, "class_type": "DualCLIPLoader"}, "311": {"_meta": {"title": "VAE Decode"}, "inputs": {"vae": ["159", 0], "samples": ["308", 0]}, "class_type": "VAEDecode"}, "335": {"_meta": {"title": "FluxGuidance"}, "inputs": {"guidance": 3.5, "conditioning": ["175", 1]}, "class_type": "FluxGuidance"}, "344": {"_meta": {"title": "Empty Latent Image"}, "inputs": {"width": 544, "height": 960, "batch_size": num_outputs}, "class_type": "EmptyLatentImage"}, "363": {"_meta": {"title": "Load Diffusion Model"}, "inputs": {"unet_name": "flux1-dev-fp8-e4m3fn.safetensors", "weight_dtype": "fp8_e4m3fn"}, "class_type": "UNETLoader"}, "365": {"_meta": {"title": "Load LoRA"}, "inputs": {"clip": ["310", 0], "model": ["363", 0], "lora_name": character_lora, "strength_clip": 0.99, "strength_model": 0.84}, "class_type": "LoraLoader"}, "369": {"_meta": {"title": "Load Image"}, "inputs": {"image": "Pose_Female_Front_full_standing_02.webp_00001_.png", "upload": "image"}, "class_type": "LoadImage"}}

    files = {"/input/Pose_Female_Front_full_standing_02.webp_00001_.png": "https://comfy.icu/api/v1/view/workflows/SqG44yXRdRzxGQmfWwlSt/input/Pose_Female_Front_full_standing_02.webp_00001_.png", "/models/loras/7Jd1cwsai241yWWSPDW_k_pytorch_lora_weights.safetensors": "https://v3.fal.media/files/lion/7Jd1cwsai241yWWSPDW_k_pytorch_lora_weights.safetensors", "/models/loras/N5sJtK8XVftjPlIj3idOB_pytorch_lora_weights.safetensors": "https://v3.fal.media/files/monkey/N5sJtK8XVftjPlIj3idOB_pytorch_lora_weights.safetensors", "/models/loras/xVhN3ierb8IFqGRNOQpBT_pytorch_lora_weights.safetensors": "https://v3.fal.media/files/tiger/xVhN3ierb8IFqGRNOQpBT_pytorch_lora_weights.safetensors", "/models/loras/yOA2a06KWEMR-ewq3j8io_pytorch_lora_weights.safetensors": "https://v3.fal.media/files/rabbit/yOA2a06KWEMR-ewq3j8io_pytorch_lora_weights.safetensors"};   

    workflow_id = "SqG44yXRdRzxGQmfWwlSt"

    run = run_workflow({"workflow_id": workflow_id, "prompt": prompt, "files": files})
    print(run)
    return(run)

def on_queue_update(update):
    if isinstance(update, fal_client.InProgress):
        for log in update.logs:
           print(log["message"])

def create_image(character_lora, character_keyword, outfit_desc, pose_id, num_outputs): 
    seed = random.randint(0, 1000000)
    print(f"seed: {seed}")
    prompt = f"Fashion model {character_keyword} wearing {outfit_desc}. posing in front of white background"

    result = fal_client.subscribe(
        "fal-ai/flux-general",
        arguments={
            "seed": seed,
            "loras": [{
                "path": character_lora,
                "scale": "1"
            }],
            "prompt": prompt,
            "image_size": "portrait_16_9",
            "num_images": num_outputs,
            "controlnets": [],
            "guidance_scale": 3.5,
            "controlnet_unions": [{
                "path": "InstantX/FLUX.1-dev-Controlnet-Union",
                "variant": None,
                "controls": [{
                    "control_mode": "pose",
                    "control_image_url": "blob:https://fal.ai/2fdf0ff3-73e0-445c-b521-51b3dcf7f2ff",
                    "end_percentage": 0.8
                }]
            }],
            "num_inference_steps": 28,
            "enable_safety_checker": True,
            "control_loras": [],
            "use_beta_schedule": True
        },
        with_logs=True,
        on_queue_update=on_queue_update,
    )
    print(result)





if __name__ == "__main__":
    character_lora = "yOA2a06KWEMR-ewq3j8io_pytorch_lora_weights.safetensors"
    character_keyword = "crxter_scandi"
    outfit_desc = "blue strap top and pink skirt   " 
    pose_id = 0

    create_image(character_lora, character_keyword, outfit_desc, pose_id)