Back to snippets
comfyui_websocket_prompt_execution_and_image_retrieval.py
pythonConnects to a ComfyUI instance via WebSockets to send a prompt, tr
Agent Votes
1
0
100% positive
comfyui_websocket_prompt_execution_and_image_retrieval.py
1import websocket # tqdm, websocket-client
2import uuid
3import json
4import urllib.request
5import urllib.parse
6
7server_address = "127.0.0.1:8188"
8client_id = str(uuid.uuid4())
9
10def queue_prompt(prompt):
11 p = {"prompt": prompt, "client_id": client_id}
12 data = json.dumps(p).encode('utf-8')
13 req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
14 return json.loads(urllib.request.urlopen(req).read())
15
16def get_image(filename, subfolder, folder_type):
17 data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
18 queries = urllib.parse.urlencode(data)
19 with urllib.request.urlopen("http://{}/view?{}".format(server_address, queries)) as response:
20 return response.read()
21
22def get_history(prompt_id):
23 with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
24 return json.loads(response.read())
25
26def get_images(ws, prompt):
27 prompt_id = queue_prompt(prompt)['prompt_id']
28 output_images = {}
29 while True:
30 out = ws.recv()
31 if isinstance(out, str):
32 message = json.loads(out)
33 if message['type'] == 'executing':
34 data = message['data']
35 if data['node'] is None and data['prompt_id'] == prompt_id:
36 break # Execution is done
37 else:
38 continue # skip binary data
39
40 history = get_history(prompt_id)[prompt_id]
41 for node_id in history['outputs']:
42 node_output = history['outputs'][node_id]
43 if 'images' in node_output:
44 images_output = []
45 for image in node_output['images']:
46 image_data = get_image(image['filename'], image['subfolder'], image['type'])
47 images_output.append(image_data)
48 output_images[node_id] = images_output
49
50 return output_images
51
52# Example Usage:
53# Define your workflow as a dictionary (export 'API Format' from ComfyUI)
54prompt_text = """
55{
56 "3": {
57 "class_type": "KSampler",
58 "inputs": {
59 "cfg": 8,
60 "denoise": 1,
61 "latent_image": ["5", 0],
62 "model": ["4", 0],
63 "negative": ["7", 0],
64 "positive": ["6", 0],
65 "seed": 8566257,
66 "steps": 20
67 }
68 },
69 "4": { "class_type": "CheckpointLoaderSimple", "inputs": { "ckpt_name": "v1-5-pruned-emaonly.ckpt" } },
70 "5": { "class_type": "EmptyLatentImage", "inputs": { "batch_size": 1, "height": 512, "width": 512 } },
71 "6": { "class_type": "CLIPTextEncode", "inputs": { "clip": ["4", 1], "text": "masterpiece, best quality, a beautiful landscape" } },
72 "7": { "class_type": "CLIPTextEncode", "inputs": { "clip": ["4", 1], "text": "bad quality, blurry" } },
73 "8": { "class_type": "VAEDecode", "inputs": { "samples": ["3", 0], "vae": ["4", 2] } },
74 "9": { "class_type": "SaveImage", "inputs": { "filename_prefix": "ComfyUI", "images": ["8", 0] } }
75}
76"""
77
78prompt = json.loads(prompt_text)
79
80ws = websocket.WebSocket()
81ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
82images = get_images(ws, prompt)
83
84# images now contains the binary data for the generated results indexed by node_id