Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fixed configuration #13

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 6 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ Learn more on our project page: [iHuman](https://pramishp.github.io/iHuman/index

## Prerequisites

* Cuda 11.8
* Cuda 11.8/12.1
* Conda
* A C++14 capable compiler
* __Linux:__ GCC/G++ 8 or higher
Expand All @@ -15,30 +15,29 @@ Learn more on our project page: [iHuman](https://pramishp.github.io/iHuman/index
First make sure all the Prerequisites are installed in your operating system. Then, invoke

```bash
conda env create -f environment.yml
conda env create -f environment-cuda12_1.yml # or environment-cuda11_8.sh
conda activate ihuman
cd submodules
bash ./install.sh
bash ./install-cuda12_1.sh # or install-cuda11_8.sh
```

## Running the code

### Step 1: Download Dataset
a. download dataset from this link https://drive.google.com/file/d/1qwM1jdabiJFmEGywuYowKD0-nC2-rWVe/view?usp=share_link
<br>
b. place it in {root}/data/people_snapshot/
b. place it in {root}/data/peoplesnapshot/

### Step 2: Download Models
a. Download the SMPL v1.1 `SMPL_python_v.1.1.0.zip` model from the [SMPL official website](https://smpl.is.tue.mpg.de/download.php) and move and rename `SMPL_python_v.1.1.0/smpl/models/*.pkl` to `PROJECT_ROOT/data/smplx_models/smpl/`.
a. Download the SMPL v1.1 `SMPL_python_v.1.1.0.zip` model from the [SMPL official website](https://smpl.is.tue.mpg.de/download.php) and move and rename `SMPL_python_v.1.1.0/smpl/models/*.pkl` to `PROJECT_ROOT/data/smpl/models`.

After this the project folder should look like this:
```
PROJECT_ROOT/data/smpl_model
PROJECT_ROOT/data/smpl/models
├── SMPL_FEMALE.pkl
├── SMPL_MALE.pkl
├── SMPL_NEUTRAL.pkl


```

b. Download the files from this drive link (https://drive.google.com/file/d/17OdyNkfdFKFqBnmFMZtmT9B-6AXKAZeG/view?usp=share_link) and place them in `PROJECT_ROOT/data/smpl/small/`.
Expand Down
170 changes: 88 additions & 82 deletions animatableGaussian/model/nerf_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@

from diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer

import yaml


class EvaluatorRecon(nn.Module):
"""adapted from https://github.com/JanaldoChen/Anim-NeRF/blob/main/models/evaluator.py"""
Expand Down Expand Up @@ -88,7 +90,9 @@ def __init__(self, opt, datamodule=None):
if not os.path.exists("test"):
os.makedirs("test")
self.robustifier = GMoF(rho=5)

with open('settings.yaml', "r") as f:
config = yaml.safe_load(f)
self.headless=config[0]['training']['headless']
self.cal_test_metrics = opt.cal_test_metrics if 'cal_test_metrics' in opt else False

def forward(self, camera_params, model_param, time, render_point=False, train=True):
Expand Down Expand Up @@ -505,16 +509,16 @@ def validation_step(self, batch, batch_idx):
rasterized_rgbs = torch.stack(rasterized_rgbs)
b_vertices = torch.stack(b_vertices)
b_normals = torch.stack(b_normals)
if not self.headless:
from animatableGaussian.vis_utils import create_side_by_side_images
gt_rgb_vs_rasterized_rgb = create_side_by_side_images(gt_images=gt_images, pred_images=rasterized_rgbs)

from animatableGaussian.vis_utils import create_side_by_side_images
gt_rgb_vs_rasterized_rgb = create_side_by_side_images(gt_images=gt_images, pred_images=rasterized_rgbs)

save_image(gt_rgb_vs_rasterized_rgb * 255,
path=f"val/{self.current_epoch}/rasterized_{batch_idx}.png")
save_image(gt_rgb_vs_rasterized_rgb * 255,
path=f"val/{self.current_epoch}/rasterized_{batch_idx}.png")

gt_normal_vs_rasterized_normal = create_side_by_side_images(gt_images=gt_normals, pred_images=b_normals)
save_image(gt_normal_vs_rasterized_normal * 255,
path=f"val/{self.current_epoch}/normal_{batch_idx}.png")
gt_normal_vs_rasterized_normal = create_side_by_side_images(gt_images=gt_normals, pred_images=b_normals)
save_image(gt_normal_vs_rasterized_normal * 255,
path=f"val/{self.current_epoch}/normal_{batch_idx}.png")

## ===================================================

Expand Down Expand Up @@ -572,85 +576,87 @@ def validation_step(self, batch, batch_idx):
## ========================================== ###

# ==================== render model and image ===============
from animatableGaussian.vis_utils import render_model_to_image
if not self.headless:
from animatableGaussian.vis_utils import render_model_to_image

model_image_overlaps = []
for i, img_path in enumerate(batch['img_path']):
model_img_overlap = render_model_to_image(b_vertices[i].detach().cpu().unsqueeze(0).numpy(),
camera,
[img_path],
save_path=None)
model_image_overlaps.append(torch.from_numpy(model_img_overlap))
model_image_overlaps = []
for i, img_path in enumerate(batch['img_path']):
model_img_overlap = render_model_to_image(b_vertices[i].detach().cpu().unsqueeze(0).numpy(),
camera,
[img_path],
save_path=None)
model_image_overlaps.append(torch.from_numpy(model_img_overlap))

model_image_overlaps = torch.stack(model_image_overlaps).permute(0, 3, 1, 2)
model_image_overlaps = torch.stack(model_image_overlaps).permute(0, 3, 1, 2)

from animatableGaussian.vis_utils import make_grid
grid_model_image_overlap = make_grid(model_image_overlaps)
save_image(grid_model_image_overlap, f'val/{self.current_epoch}/model_over_rgb_{batch_idx}.png')
from animatableGaussian.vis_utils import make_grid
grid_model_image_overlap = make_grid(model_image_overlaps)
save_image(grid_model_image_overlap, f'val/{self.current_epoch}/model_over_rgb_{batch_idx}.png')

# =====
# model front side
from animatableGaussian.vis_utils import render_model_front_n_side
front_side_model_views = []
for i, img_path in enumerate(batch['img_path']):
front_side_model_view = render_model_front_n_side(b_vertices[i].detach().cpu().unsqueeze(0).numpy(),
camera)
front_side_model_views.append(torch.from_numpy(front_side_model_view))

front_side_model_views = torch.stack(front_side_model_views).permute(0, 3, 1, 2)

from animatableGaussian.vis_utils import make_grid
grid_front_side = make_grid(front_side_model_views)
save_image(grid_front_side, f'val/{self.current_epoch}/front_side_view_model_{batch_idx}.png')

# log to tensorflow
tensorboard = self.logger.experiment
if tensorboard:

tensorboard.add_image(f'rgb_reconstructed_{batch_idx}',
gt_rgb_vs_rasterized_rgb,
self.current_epoch, dataformats='HWC')

# if 'pose_2d' in batch:
# tensorboard.add_image(f'pose_{batch_idx}',
# pose_image_grid / 255.0,
# self.current_epoch, dataformats='HWC')

tensorboard.add_image(f'model_img_overlap_{batch_idx}',
grid_model_image_overlap / 255.0,
self.current_epoch, dataformats='HWC')

tensorboard.add_image(f'model_front_side_{batch_idx}',
grid_front_side / 255.0,
self.current_epoch, dataformats='HWC')

tensorboard.add_image(f'normal_{batch_idx}', gt_normal_vs_rasterized_normal, self.current_epoch,
dataformats="HWC")

# mesh visualization
camera_config = {'cls': 'PerspectiveCamera'}
verts = b_vertices.cpu().clone()
verts[:, :, 1] *= -1
verts -= verts.mean(1).unsqueeze(1)

faces = self.model.faces[None, ...]

pc = verts.clone()

# pred_gt_verts = verts.unsqueeze(0)
# append GT verts
if self.cal_test_metrics and 'gt_vertices' in batch.keys():
gt_verts = batch['gt_vertices'].detach().cpu()
gt_verts -= gt_verts.mean(1).unsqueeze(1)
gt_verts[:, :, 1] *= -1
gt_verts[:, :, 0] += 1
pc = torch.hstack([verts, gt_verts])

tensorboard.add_mesh(f'reconstructed_pc_{batch_idx}',
vertices=pc,
# faces=faces,
config_dict={"camera": camera_config},
global_step=self.current_epoch)
if not self.headless:
from animatableGaussian.vis_utils import render_model_front_n_side
front_side_model_views = []
for i, img_path in enumerate(batch['img_path']):
front_side_model_view = render_model_front_n_side(b_vertices[i].detach().cpu().unsqueeze(0).numpy(),
camera)
front_side_model_views.append(torch.from_numpy(front_side_model_view))

front_side_model_views = torch.stack(front_side_model_views).permute(0, 3, 1, 2)

from animatableGaussian.vis_utils import make_grid
grid_front_side = make_grid(front_side_model_views)
save_image(grid_front_side, f'val/{self.current_epoch}/front_side_view_model_{batch_idx}.png')

# log to tensorflow
tensorboard = self.logger.experiment
if tensorboard:

tensorboard.add_image(f'rgb_reconstructed_{batch_idx}',
gt_rgb_vs_rasterized_rgb,
self.current_epoch, dataformats='HWC')

# if 'pose_2d' in batch:
# tensorboard.add_image(f'pose_{batch_idx}',
# pose_image_grid / 255.0,
# self.current_epoch, dataformats='HWC')

tensorboard.add_image(f'model_img_overlap_{batch_idx}',
grid_model_image_overlap / 255.0,
self.current_epoch, dataformats='HWC')

tensorboard.add_image(f'model_front_side_{batch_idx}',
grid_front_side / 255.0,
self.current_epoch, dataformats='HWC')

tensorboard.add_image(f'normal_{batch_idx}', gt_normal_vs_rasterized_normal, self.current_epoch,
dataformats="HWC")

# mesh visualization
camera_config = {'cls': 'PerspectiveCamera'}
verts = b_vertices.cpu().clone()
verts[:, :, 1] *= -1
verts -= verts.mean(1).unsqueeze(1)

faces = self.model.faces[None, ...]

pc = verts.clone()

# pred_gt_verts = verts.unsqueeze(0)
# append GT verts
if self.cal_test_metrics and 'gt_vertices' in batch.keys():
gt_verts = batch['gt_vertices'].detach().cpu()
gt_verts -= gt_verts.mean(1).unsqueeze(1)
gt_verts[:, :, 1] *= -1
gt_verts[:, :, 0] += 1
pc = torch.hstack([verts, gt_verts])

tensorboard.add_mesh(f'reconstructed_pc_{batch_idx}',
vertices=pc,
# faces=faces,
config_dict={"camera": camera_config},
global_step=self.current_epoch)

@torch.no_grad()
def test_step(self, batch, batch_idx, *args, **kwargs):
Expand Down
20 changes: 12 additions & 8 deletions animate.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,10 @@
from animatableGaussian.model.nerf_model import NeRFModel

DEVICE = "cuda"
import yaml
with open('settings.yaml', "r") as f:
config = yaml.safe_load(f)
create_mesh=config[0]['inference']['create_mesh']


def load_mixamo_smpl(actions_dir, action_type='0007', skip=1):
Expand Down Expand Up @@ -104,16 +108,16 @@ def main(opt):
image = rgb.detach().cpu().permute(1, 2, 0).numpy() * 255.0
animations.append(image)
cv2.imwrite(img_path, image[:, :, ::-1])

verts_posed = vt
pred_faces = model.model.faces[0]
vertex_colors = model.model.get_vertex_colors()
mesh_o3d = too3dmesh(verts_posed, pred_faces, vertex_colors)
os.makedirs(f"{output_path}/mesh", exist_ok=True)
o3d.io.write_triangle_mesh(f"{output_path}/mesh/{idx}.obj", mesh_o3d)
if create_mesh:
verts_posed = vt
pred_faces = model.model.faces[0]
vertex_colors = model.model.get_vertex_colors()
mesh_o3d = too3dmesh(verts_posed, pred_faces, vertex_colors)
os.makedirs(f"{output_path}/mesh", exist_ok=True)
o3d.io.write_triangle_mesh(f"{output_path}/mesh/{idx}.obj", mesh_o3d)

animations = [np.asarray(animation, dtype=np.uint8) for animation in animations]
imageio.mimsave(f"{output_path}/training.gif", animations)
imageio.mimsave(f"{output_path}/training.gif", animations,fps=50)


if __name__ == "__main__":
Expand Down
File renamed without changes.
Loading