123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134 |
- import json
- from urllib import request, parse
- import random
- import sys
- import ast
- import base64
- import uuid
- # This is the ComfyUI api prompt format.
- # If you want it for a specific workflow you can "enable dev mode options"
- # in the settings of the UI (gear beside the "Queue Size: ") this will enable
- # a button on the UI to save workflows in api format.
- # keep in mind ComfyUI is pre alpha software so this format will change a bit.
- # this is the one for the default workflow
- # load the prompt from a workflow_api_test.json file
- def convert_base64_string_to_object(base64_string):
- bytes = base64.b64decode(base64_string)
- string = bytes.decode("ascii")
- return json.loads(string)
- server_address = "127.0.0.1:8188"
- client_id = str(uuid.uuid4())
- with open(
- "D://Git//ap-canvas-creation-module//04_stable_diffusion//workflows//canvas_3d_to_img_standard_V1.json",
- "r",
- ) as f:
- prompt_text_json = f.read()
- def queue_prompt(prompt):
- p = {"prompt": prompt}
- data = json.dumps(p).encode("utf-8")
- req = request.Request("http://127.0.0.1:8188/prompt", data=data)
- request.urlopen(req)
- def load_debug_ai_scene_info():
- #open ai_scene_info.json
- with open("D:/Git/ap-canvas-creation-module/04_stable_diffusion/ai_scene_info.json", "r") as f:
- ai_scene_info = json.load(f)
- return ai_scene_info
- def find_node(json_obj, title):
- for key, value in json_obj.items():
- if isinstance(value, dict):
- if value.get("_meta", {}).get("title") == title:
- return value
- else:
- result = find_node(value, title)
- if result:
- return result
- return None
- def set_filename(json_obj, title, new_prefix):
- for key, value in json_obj.items():
- if isinstance(value, dict):
- if value.get("_meta", {}).get("title") == title:
- if "inputs" in value and "filename_prefix" in value["inputs"]:
- value["inputs"]["filename_prefix"] = new_prefix
- else:
- result = set_filename(value, title, new_prefix)
- if result:
- return result
- return None
- def main():
- # main code here
- argv = sys.argv
- try:
- argv = argv[argv.index("--") + 1 :]
- ai_scene_info = convert_base64_string_to_object(argv[0])
- print("loading scene data", ai_scene_info)
- except Exception as e:
- print("Error:", e)
- ai_scene_info = load_debug_ai_scene_info()
- positive_text = ai_scene_info["ai_scene"]["settings"]["positive_prompt"]
- negative_text = ai_scene_info["ai_scene"]["settings"]["negative_prompt"]
- image_path = "D://Git//ap-canvas-creation-module//03_blender//sd_blender//sample_scene//Renders//15a314a1-8ba1-4e0e-ad0c-f605b06f89f8//"
- image_base_path = image_path + "base0001.jpg"
- image_alpha_products_path = image_path + "alpha_products0001.jpg"
- # image_depth_path = image_path + "depth0001.png"
- prompt = json.loads(prompt_text_json)
- set_filename(prompt, "Save Image", "custom/basic_api_example")
- ksampler_main = find_node(prompt, "KSampler")
- ksampler_main["inputs"]["noise_seed"] = random.randint(0, 1000000)
- ksampler_main = find_node(prompt, "KSampler")
- ksampler_main["inputs"]["steps"] = 30
- prompt_positive = find_node(prompt, "positive_CLIPTextEncodeSDXL")
- prompt_positive["inputs"]["text_g"] = positive_text
- prompt_positive["inputs"]["text_l"] = positive_text
- prompt_negative = find_node(prompt, "negative_CLIPTextEncodeSDXL")
- prompt_negative["inputs"]["text_g"] = negative_text
- prompt_negative["inputs"]["text_l"] = negative_text
- image_base = find_node(prompt, "image_base")
- image_base["inputs"]["image"] = image_base_path
- image_base = find_node(prompt, "image_product_mask")
- image_base["inputs"]["image"] = image_alpha_products_path
- image_base = find_node(prompt, "image_depth")
- # image_base["inputs"]["image"] = image_depth_path
- queue_prompt(prompt)
- if __name__ == "__main__":
- main()
|