diff --git a/examples/benchmarks/compression/mcmc.sh b/examples/benchmarks/compression/mcmc.sh index 4c7165f3d..274185842 100644 --- a/examples/benchmarks/compression/mcmc.sh +++ b/examples/benchmarks/compression/mcmc.sh @@ -32,13 +32,13 @@ do # train without eval CUDA_VISIBLE_DEVICES=0 python simple_trainer.py mcmc --eval_steps -1 --disable_viewer --data_factor $DATA_FACTOR \ --strategy.cap-max $CAP_MAX \ - --data_dir data/360_v2/$SCENE/ \ + --data_dir $SCENE_DIR/$SCENE/ \ --result_dir $RESULT_DIR/$SCENE/ # eval: use vgg for lpips to align with other benchmarks CUDA_VISIBLE_DEVICES=0 python simple_trainer.py mcmc --disable_viewer --data_factor $DATA_FACTOR \ --strategy.cap-max $CAP_MAX \ - --data_dir data/360_v2/$SCENE/ \ + --data_dir $SCENE_DIR/$SCENE/ \ --result_dir $RESULT_DIR/$SCENE/ \ --lpips_net vgg \ --compression png \ @@ -49,7 +49,7 @@ done if command -v zip &> /dev/null then echo "Zipping results" - python benchmarks/compression/summarize_stats.py --results_dir $RESULT_DIR + python benchmarks/compression/summarize_stats.py --results_dir $RESULT_DIR --scenes $SCENE_LIST else echo "zip command not found, skipping zipping" fi \ No newline at end of file diff --git a/examples/benchmarks/compression/mcmc_tt.sh b/examples/benchmarks/compression/mcmc_tt.sh index 054920929..34d9f5ea7 100644 --- a/examples/benchmarks/compression/mcmc_tt.sh +++ b/examples/benchmarks/compression/mcmc_tt.sh @@ -7,7 +7,7 @@ SCENE_LIST="train truck" # CAP_MAX=360000 # # 0.49M GSs -# RESULT_DIR="results/benchmark_tt_mcmc_tt_0_49M_png_compression" +# RESULT_DIR="results/benchmark_tt_mcmc_0_49M_png_compression" # CAP_MAX=490000 # 1M GSs diff --git a/examples/benchmarks/compression/summarize_stats.py b/examples/benchmarks/compression/summarize_stats.py index d11dbed6f..5efa729fe 100644 --- a/examples/benchmarks/compression/summarize_stats.py +++ b/examples/benchmarks/compression/summarize_stats.py @@ -8,9 +8,8 @@ import tyro -def main(results_dir: str, scenes: List[str]): +def main(results_dir: str, scenes: List[str], stage: str = "compress"): print("scenes:", scenes) - stage = "compress" summary = defaultdict(list) for scene in scenes: @@ -33,7 +32,11 @@ def main(results_dir: str, scenes: List[str]): summary[k].append(v) for k, v in summary.items(): - print(k, np.mean(v)) + summary[k] = np.mean(v) + summary["scenes"] = scenes + + with open(os.path.join(results_dir, f"{stage}_summary.json"), "w") as f: + json.dump(summary, f, indent=2) if __name__ == "__main__": diff --git a/examples/datasets/colmap.py b/examples/datasets/colmap.py index 938bad265..59effffb3 100644 --- a/examples/datasets/colmap.py +++ b/examples/datasets/colmap.py @@ -1,9 +1,11 @@ import os import json +from tqdm import tqdm from typing import Any, Dict, List, Optional from typing_extensions import assert_never import cv2 +from PIL import Image import imageio.v2 as imageio import numpy as np import torch @@ -163,6 +165,28 @@ def __init__( # so we need to map between the two sorted lists of files. colmap_files = sorted(_get_rel_paths(colmap_image_dir)) image_files = sorted(_get_rel_paths(image_dir)) + if factor > 1 and os.path.splitext(image_files[0])[1].lower() == ".jpg": + print("Downscaling full resolution images instead of provided jpgs.") + image_dir = image_dir + "_png" + os.makedirs(image_dir, exist_ok=True) + image_files = [ + os.path.splitext(image_file)[0] + ".png" for image_file in image_files + ] + for colmap_file, image_file in zip(tqdm(colmap_files), image_files): + resized_image_path = os.path.join(image_dir, image_file) + if os.path.isfile(resized_image_path): + continue + full_image = imageio.imread( + os.path.join(colmap_image_dir, colmap_file) + )[..., :3] + resized_size = ( + int(round(full_image.shape[1] / factor)), + int(round(full_image.shape[0] / factor)), + ) + resized_image = np.array( + Image.fromarray(full_image).resize(resized_size, Image.BICUBIC) + ) + imageio.imwrite(resized_image_path, resized_image) colmap_to_image = dict(zip(colmap_files, image_files)) image_paths = [os.path.join(image_dir, colmap_to_image[f]) for f in image_names] @@ -389,7 +413,6 @@ def __getitem__(self, item: int) -> Dict[str, Any]: import argparse import imageio.v2 as imageio - import tqdm parser = argparse.ArgumentParser() parser.add_argument("--data_dir", type=str, default="data/360_v2/garden") @@ -404,7 +427,7 @@ def __getitem__(self, item: int) -> Dict[str, Any]: print(f"Dataset: {len(dataset)} images.") writer = imageio.get_writer("results/points.mp4", fps=30) - for data in tqdm.tqdm(dataset, desc="Plotting points"): + for data in tqdm(dataset, desc="Plotting points"): image = data["image"].numpy().astype(np.uint8) points = data["points"].numpy() depths = data["depths"].numpy()