|
17 | 17 | import numpy as np |
18 | 18 | from random import randint |
19 | 19 | from stable_args import args |
| 20 | +from datetime import datetime as dt |
| 21 | +import json |
| 22 | +import re |
| 23 | +from pathlib import Path |
20 | 24 |
|
21 | 25 | # This has to come before importing cache objects |
22 | 26 | if args.clear_all: |
@@ -250,5 +254,25 @@ def end_profiling(device): |
250 | 254 | pil_images = [ |
251 | 255 | transform(image) for image in torch.from_numpy(images).to(torch.uint8) |
252 | 256 | ] |
| 257 | + |
| 258 | + if args.output_dir is not None: |
| 259 | + output_path = Path(args.output_dir) |
| 260 | + output_path.mkdir(parents=True, exist_ok=True) |
| 261 | + else: |
| 262 | + output_path = Path.cwd() |
253 | 263 | for i in range(batch_size): |
254 | | - pil_images[i].save(f"{args.prompts[i]}_{i}.jpg") |
| 264 | + json_store = { |
| 265 | + "prompt": args.prompts[i], |
| 266 | + "negative prompt": args.negative_prompts[i], |
| 267 | + "seed": args.seed, |
| 268 | + "variant": args.variant, |
| 269 | + "precision": args.precision, |
| 270 | + "steps": args.steps, |
| 271 | + "guidance_scale": args.guidance_scale, |
| 272 | + "scheduler": args.scheduler, |
| 273 | + } |
| 274 | + prompt_slice = re.sub("[^a-zA-Z0-9]", "_", args.prompts[i][:15]) |
| 275 | + img_name = f"{prompt_slice}_{args.seed}_{i}_{dt.now().strftime('%y%m%d_%H%M%S')}" |
| 276 | + pil_images[i].save(output_path / f"{img_name}.jpg") |
| 277 | + with open(output_path / f"{img_name}.json", "w") as f: |
| 278 | + f.write(json.dumps(json_store, indent=4)) |
0 commit comments