@@ -77,6 +77,12 @@ def resize_and_center_crop(image, size):
7777 image = image .resize ((int (fac * image .size [0 ]), int (fac * image .size [1 ])), Image .LANCZOS )
7878 return TF .center_crop (image , size [::- 1 ])
7979
80+ def callback_fn (info ):
81+ if info ['i' ] % 50 == 0 or info ['i' ]== args .steps :
82+ out = info ['pred' ].add (1 ).div (2 )
83+ save_image (out , f"interm_output_{ i :05d} .png" )
84+ if IS_NOTEBOOK :
85+ display .display (display .Image (f"interm_output_{ info ['i' ]:05d} .png" ,height = 300 ))
8086
8187def main ():
8288 p = argparse .ArgumentParser (description = __doc__ ,
@@ -176,13 +182,6 @@ def main():
176182
177183 torch .manual_seed (args .seed )
178184
179- def callback_fn (pred , i ):
180- if i % 50 == 0 or i == args .steps :
181- out = pred .add (1 ).div (2 )
182- save_image (out , f"interm_output_{ i :05d} .png" )
183- if IS_NOTEBOOK :
184- display .display (display .Image (f"interm_output_{ i :05d} .png" ,height = 300 ))
185-
186185 def cond_fn (x , t , pred , clip_embed ):
187186 clip_in = normalize (make_cutouts ((pred + 1 ) / 2 ))
188187 image_embeds = clip_model .encode_image (clip_in ).view ([args .cutn , x .shape [0 ], - 1 ])
@@ -295,13 +294,6 @@ def run_diffusion(prompts,images=None,steps=1000,init=None,model="yfcc_2",size=[
295294
296295 torch .manual_seed (args .seed )
297296
298- def callback_fn (pred , i ):
299- if i % display_freq == 0 or i == args .steps :
300- out = pred .add (1 ).div (2 )
301- save_image (out , f"interm_output_{ i :05d} .png" )
302- if IS_NOTEBOOK :
303- display .display (display .Image (f"interm_output_{ i :05d} .png" ,height = 300 ))
304-
305297 def cond_fn (x , t , pred , clip_embed ):
306298 clip_in = normalize (make_cutouts ((pred + 1 ) / 2 ))
307299 image_embeds = clip_model .encode_image (clip_in ).view ([args .cutn , x .shape [0 ], - 1 ])
0 commit comments