Skip to content

Commit

Permalink
add copyright
Browse files Browse the repository at this point in the history
  • Loading branch information
jiaxi-jiang committed Mar 15, 2024
1 parent db45b29 commit e2b16e0
Show file tree
Hide file tree
Showing 11 changed files with 108 additions and 34 deletions.
16 changes: 11 additions & 5 deletions data/dataset_amass.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,14 @@
'''
# --------------------------------------------
# dataloader for AMASS dataset
# --------------------------------------------
# AvatarPoser: Articulated Full-Body Pose Tracking from Sparse Motion Sensing (ECCV 2022)
# https://github.com/eth-siplab/AvatarPoser
# Jiaxi Jiang ([email protected])
# Sensing, Interaction & Perception Lab,
# Department of Computer Science, ETH Zurich
'''

import torch
import numpy as np
import os
Expand All @@ -7,17 +18,12 @@
from human_body_prior.tools.omni_tools import copy2cpu as c2c
from human_body_prior.tools.rotation_tools import aa2matrot,matrot2aa,local2global_pose
import random
from utils import utils_transform

from scipy import signal

import glob
from IPython import embed
import time
import copy
import pickle


class AMASS_Dataset(Dataset):
"""Motion Capture dataset"""

Expand Down
10 changes: 10 additions & 0 deletions data/select_dataset.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,13 @@
'''
# --------------------------------------------
# define dataset
# --------------------------------------------
# AvatarPoser: Articulated Full-Body Pose Tracking from Sparse Motion Sensing (ECCV 2022)
# https://github.com/eth-siplab/AvatarPoser
# Jiaxi Jiang ([email protected])
# Sensing, Interaction & Perception Lab,
# Department of Computer Science, ETH Zurich
'''
def define_Dataset(dataset_opt):
dataset_type = dataset_opt['dataset_type'].lower()

Expand Down
17 changes: 11 additions & 6 deletions main_test_avatarposer.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,26 @@
'''
# --------------------------------------------
# main testing code
# --------------------------------------------
# AvatarPoser: Articulated Full-Body Pose Tracking from Sparse Motion Sensing (ECCV 2022)
# https://github.com/eth-siplab/AvatarPoser
# Jiaxi Jiang ([email protected])
# Sensing, Interaction & Perception Lab,
# Department of Computer Science, ETH Zurich
'''

import os.path
import math
import argparse
import random
import numpy as np
from collections import OrderedDict
import logging
import torch
from torch.utils.data import DataLoader
from utils import utils_logger
from utils import utils_option as option
from data.select_dataset import define_Dataset
from models.select_model import define_Model
from utils import utils_transform
import pickle
from utils import utils_visualize as vis


save_animation = False
resolution = (800,800)

Expand Down
12 changes: 11 additions & 1 deletion main_train_avatarposer.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,14 @@
'''
# --------------------------------------------
# main training code
# --------------------------------------------
# AvatarPoser: Articulated Full-Body Pose Tracking from Sparse Motion Sensing (ECCV 2022)
# https://github.com/eth-siplab/AvatarPoser
# Jiaxi Jiang ([email protected])
# Sensing, Interaction & Perception Lab,
# Department of Computer Science, ETH Zurich
'''

import os.path
import math
import argparse
Expand All @@ -15,7 +26,6 @@
import pickle
from utils import utils_visualize as vis


save_animation = False
resolution = (800,800)

Expand Down
Binary file removed models/.emformer.py.swp
Binary file not shown.
11 changes: 11 additions & 0 deletions models/model_avatarposer.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,14 @@
'''
# --------------------------------------------
# code for model optimization and testing
# --------------------------------------------
# AvatarPoser: Articulated Full-Body Pose Tracking from Sparse Motion Sensing (ECCV 2022)
# https://github.com/eth-siplab/AvatarPoser
# Jiaxi Jiang ([email protected])
# Sensing, Interaction & Perception Lab,
# Department of Computer Science, ETH Zurich
'''

from collections import OrderedDict
import torch
import torch.nn as nn
Expand Down
11 changes: 10 additions & 1 deletion models/network.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,19 @@
'''
# --------------------------------------------
# network architecture
# --------------------------------------------
# AvatarPoser: Articulated Full-Body Pose Tracking from Sparse Motion Sensing (ECCV 2022)
# https://github.com/eth-siplab/AvatarPoser
# Jiaxi Jiang ([email protected])
# Sensing, Interaction & Perception Lab,
# Department of Computer Science, ETH Zurich
'''
import torch
import torch.nn as nn
from IPython import embed
import math
from utils import utils_transform


nn.Module.dump_patches = True


Expand Down
18 changes: 11 additions & 7 deletions models/select_model.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,21 @@
'''
# --------------------------------------------
# define training model
# --------------------------------------------
# AvatarPoser: Articulated Full-Body Pose Tracking from Sparse Motion Sensing (ECCV 2022)
# https://github.com/eth-siplab/AvatarPoser
# Jiaxi Jiang ([email protected])
# Sensing, Interaction & Perception Lab,
# Department of Computer Science, ETH Zurich
'''

import functools
import torch
from torch.nn import init
from human_body_prior.body_model.body_model import BodyModel
import os


"""
# --------------------------------------------
# define training model
# --------------------------------------------
"""


def define_Model(opt):
model = opt['model'] # one input: L

Expand Down
24 changes: 10 additions & 14 deletions prepare_data.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,22 @@
'''
# --------------------------------------------
# data preprocessing for AMASS dataset
# --------------------------------------------
# AvatarPoser: Articulated Full-Body Pose Tracking from Sparse Motion Sensing (ECCV 2022)
# https://github.com/eth-siplab/AvatarPoser
# Jiaxi Jiang ([email protected])
# Sensing, Interaction & Perception Lab,
# Department of Computer Science, ETH Zurich
'''
import torch
import numpy as np
import os
import time
from torch.utils.data import Dataset, DataLoader
from human_body_prior.body_model.body_model import BodyModel
from human_body_prior.tools.omni_tools import copy2cpu as c2c
from human_body_prior.tools.rotation_tools import aa2matrot,matrot2aa,local2global_pose
from utils import utils_transform
import glob
from IPython import embed
import time
import copy
import pickle


dataroot_amass ="amass" # root of amass dataset

for dataroot_subset in ["MPI_HDM05", "BioMotionLab_NTroje", "CMU"]:
Expand Down Expand Up @@ -51,7 +54,6 @@
bm_male = BodyModel(bm_fname=bm_fname_male, num_betas=num_betas, num_dmpls=num_dmpls, dmpl_fname=dmpl_fname_male)#.to(comp_device)
bm_female = BodyModel(bm_fname=bm_fname_female, num_betas=num_betas, num_dmpls=num_dmpls, dmpl_fname=dmpl_fname_female)

#embed()
idx = 0
for filepath in filepaths:
data = dict()
Expand All @@ -68,7 +70,6 @@
# else:
idx+=1
print(idx)
# embed()

if framerate == 120:
stride = 2
Expand All @@ -81,19 +82,16 @@

bm = bm_male# if subject_gender == 'male' else bm_female

# embed()
body_parms = {
'root_orient': torch.Tensor(bdata_poses[:, :3]),#.to(comp_device), # controls the global root orientation
'pose_body': torch.Tensor(bdata_poses[:, 3:66]),#.to(comp_device), # controls the body
'trans': torch.Tensor(bdata_trans),#.to(comp_device), # controls the global body position
}

body_parms_list = body_parms
# embed()

body_pose_world=bm(**{k:v for k,v in body_parms.items() if k in ['pose_body','root_orient','trans']})

# embed()
# self.rotation_local_full_gt_list.append(body_parms['pose_body'])
# self.rotation_local_full_gt_list.append(torch.Tensor(bdata['poses'][:, :66]))
output_aa = torch.Tensor(bdata_poses[:, :66]).reshape(-1,3)
Expand All @@ -120,7 +118,6 @@
head_global_trans[:,:3,:3] = head_rotation_global_matrot.squeeze()
head_global_trans[:,:3,3] = position_global_full_gt_world[:,15,:]

# embed()
head_global_trans_list = head_global_trans[1:]


Expand Down Expand Up @@ -155,6 +152,5 @@
data['filepath'] = filepath


#embed()
with open(os.path.join(savedir,'{}.pkl'.format(idx)), 'wb') as f:
pickle.dump(data, f)
11 changes: 11 additions & 0 deletions utils/utils_transform.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,14 @@
'''
# --------------------------------------------
# utility functions for 3D transformation
# --------------------------------------------
# AvatarPoser: Articulated Full-Body Pose Tracking from Sparse Motion Sensing (ECCV 2022)
# https://github.com/eth-siplab/AvatarPoser
# Jiaxi Jiang ([email protected])
# Sensing, Interaction & Perception Lab,
# Department of Computer Science, ETH Zurich
'''

import numpy as np
from torch.nn import functional as F
from human_body_prior.tools import tgm_conversion as tgm
Expand Down
12 changes: 12 additions & 0 deletions utils/utils_visualize.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,14 @@
'''
# --------------------------------------------
# visualization code
# --------------------------------------------
# AvatarPoser: Articulated Full-Body Pose Tracking from Sparse Motion Sensing (ECCV 2022)
# https://github.com/eth-siplab/AvatarPoser
# Jiaxi Jiang ([email protected])
# Sensing, Interaction & Perception Lab,
# Department of Computer Science, ETH Zurich
'''

import torch
import cv2
import os
Expand All @@ -11,6 +22,7 @@
import trimesh.util as util
from psbody.mesh import Mesh


os.environ['PYOPENGL_PLATFORM'] = 'egl'

"""
Expand Down

0 comments on commit e2b16e0

Please sign in to comment.