diff --git a/chatsim/agents/background_rendering_agent.py b/chatsim/agents/background_rendering_agent.py index 78051ed..331161b 100644 --- a/chatsim/agents/background_rendering_agent.py +++ b/chatsim/agents/background_rendering_agent.py @@ -43,6 +43,10 @@ def func_render_background(self, scene): poses_render = scene.current_extrinsics[:, :3, :] np.save(os.path.join(self.nerf_data_dir, 'poses_render.npy'), poses_render) + # remove previous rendered images + if os.path.exists(self.nerf_novel_view_dir) and len(os.listdir(self.nerf_novel_view_dir)) > 0: + os.system(f"rm -r {self.nerf_novel_view_dir}/*") + current_dir = os.getcwd() os.chdir(self.f2nerf_dir) # do not generate intermediate file at root directory render_command = f'python scripts/run.py \ diff --git a/chatsim/agents/utils.py b/chatsim/agents/utils.py index 821af03..16c94db 100644 --- a/chatsim/agents/utils.py +++ b/chatsim/agents/utils.py @@ -10,6 +10,7 @@ import imageio.v2 as imageio import collections from termcolor import colored +from tqdm import tqdm class Struct: def __init__(self, **entries): @@ -33,15 +34,10 @@ def generate_video(scene, prompt): filename = prompt.replace(' ', '_')[:40] fps = scene.fps print(colored("[Compositing video]", 'blue', attrs=['bold']), "start...") - # save to gif - imageio.mimsave(os.path.join(video_output_path, f"{filename}.gif"), - scene.final_video_frames, - fps=fps - ) - # save to mp4 + writer = imageio.get_writer(os.path.join(video_output_path, f"{filename}.mp4"), fps=fps) - for frame in scene.final_video_frames: + for frame in tqdm(scene.final_video_frames): writer.append_data(frame) writer.close() # save frames to folder diff --git a/chatsim/background/inpainting/latent-diffusion/ldm/models/diffusion/ddpm.py b/chatsim/background/inpainting/latent-diffusion/ldm/models/diffusion/ddpm.py index ef0990e..e4c6cb1 100644 --- a/chatsim/background/inpainting/latent-diffusion/ldm/models/diffusion/ddpm.py +++ b/chatsim/background/inpainting/latent-diffusion/ldm/models/diffusion/ddpm.py @@ -16,7 +16,10 @@ from functools import partial from tqdm import tqdm from torchvision.utils import make_grid -from pytorch_lightning.utilities.rank_zero import rank_zero_only +try: + from pytorch_lightning.utilities.distributed import rank_zero_only +except ImportError: + from pytorch_lightning.utilities.rank_zero import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma diff --git a/chatsim/background/inpainting/latent-diffusion/main.py b/chatsim/background/inpainting/latent-diffusion/main.py index e8e18c1..99f120e 100644 --- a/chatsim/background/inpainting/latent-diffusion/main.py +++ b/chatsim/background/inpainting/latent-diffusion/main.py @@ -14,7 +14,10 @@ from pytorch_lightning import seed_everything from pytorch_lightning.trainer import Trainer from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor -from pytorch_lightning.utilities.distributed import rank_zero_only +try: # previous requirements.txt + from pytorch_lightning.utilities.distributed import rank_zero_only +except ImportError: # new requirements.txt + from pytorch_lightning.utilities.rank_zero import rank_zero_only from pytorch_lightning.utilities import rank_zero_info from ldm.data.base import Txt2ImgIterableBaseDataset