Skip to content

Commit

Permalink
sync
Browse files Browse the repository at this point in the history
  • Loading branch information
yanggao2000 committed Nov 6, 2023
1 parent 47271ca commit 1fc76f5
Show file tree
Hide file tree
Showing 189 changed files with 1,203 additions and 24,512 deletions.
Binary file added .DS_Store
Binary file not shown.
12 changes: 8 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,13 +1,17 @@
# JRDB-Traj
JRDB Data Preprocessing and Trajectory Prediction Baselines

## Prerequisites
Install requirements with `bash requirement.sh`.

## Repository Overview
The pipeline encompasses three key steps:
The pipeline encompasses four key steps:

1. `python traj_extractor.py`: This script preprocesses the JRDB dataset, extracting trajectories for further analysis.
2. `bash traj_categorize.sh`: Utilizing the TrajNet++ benchmark, this script categorizes '.csv' files and generates '.ndjson' files for the next step.
1. `bash dataload.sh`: This script preprocesses the JRDB dataset, extracting trajectories for further analysis.
2. `bash preprocess.sh`: Utilizing the TrajNet++ benchmark, this script categorizes '.csv' files and generates '.ndjson' files for the next step.
3. `bash train.sh`: This script train baseline trajectory prediction models using the meticulously prepared data.
4. `bash eval.sh`: This script will generate predictions in JRDB leaderboard format.


## Work in Progress
This repository is being updated so stay tuned!
This repository is being updated so please stay tuned!
15 changes: 15 additions & 0 deletions dataload.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# After extracting the raw data, you will get the extracted data located at the output_path you set.
jrdb_path="/data2/saeed-data/jrdb/train_dataset/labels/"
jrdb_test_path=".../test_trackings/
out_path="OUT_tmp"
python train_traj_extractor.py --out_path $out_path --jrdb_path $jrdb_path
python test_traj_extractor.py --out_path $out_path --jrdb_path $jrdb_test_path
# There will also be two temp folders named 'temp' and 'conf_temp', can be removed.
rm -r $out_path/temp $out_path/conf_temp
# Move the extracted data to 'trajnetplusplusdataset/data/raw/'.)
mv $out_path trajnetplusplusdataset/data/raw/
2 changes: 2 additions & 0 deletions eval.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
cd jrdb_baselines
python -m trajnetbaselines.lstm.trajnet_evaluator --path jrdb_traj_with_nan --output OUTPUT_BLOCK/jrdb_traj_with_nan/lstm_social_baseline.pkl
Binary file added jrdb_baselines/.DS_Store
Binary file not shown.
File renamed without changes.
File renamed without changes.
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -1,8 +1,14 @@
# import pickle
# import numpy as np
import json
import pickle

import numpy as np
import trajnetplusplustools
import sys, os
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)

from trajnetbaselines.lstm.tools.reader import Reader

def load_test_datasets(dataset, goal_flag, args):
"""Load Test Prediction file with goals (optional)"""
Expand All @@ -11,13 +17,10 @@ def load_test_datasets(dataset, goal_flag, args):
print('Dataset Name: ', dataset_name)

# Read Scenes from 'test' folder
reader = trajnetplusplustools.Reader(args.path.replace('_pred', '') + dataset + '.ndjson', scene_type='paths')
reader = Reader(args.path.replace('_pred', '') + dataset + '.ndjson', scene_type='paths')
## Necessary modification of train scene to add filename (for goals)
scenes = [(dataset, s_id, s) for s_id, s in reader.scenes()]

## Consider goals
## Goal file must be present in 'goal_files/test_private' folder
## Goal file must have the same name as corresponding test file
if goal_flag:
print("Loading Goal File: ", 'goal_files/test_private/' + dataset +'.pkl')
goal_dict = pickle.load(open('goal_files/test_private/' + dataset +'.pkl', "rb"))
Expand All @@ -40,43 +43,46 @@ def preprocess_test(scene, obs_len):
return scene


def write_predictions(pred_list, scenes, model_name, dataset_name, args):
def write_predictions(pred_list, scenes, model_name, dataset_name, dataset_index, args):
"""Write predictions corresponding to the scenes in the respective file"""
seq_length = args.obs_length + args.pred_length
with open(args.path + '{}/{}'.format(model_name, dataset_name), "a") as myfile:
dataset_name = dataset_name.replace('.ndjson','_temp.txt')

path_temp = args.path+model_name+'/'+'temp/'+dataset_name
path_pred = args.path+model_name+'/'+'jrdb_submission/'

with open(path_temp, "a") as myfile:
## Write All Predictions
for (predictions, (_, scene_id, paths)) in zip(pred_list, scenes):
## Extract 1) first_frame, 2) frame_diff 3) ped_ids for writing predictions
observed_path = paths[0]
frame_diff = observed_path[1].frame - observed_path[0].frame
first_frame = observed_path[args.obs_length-1].frame + frame_diff
ped_id = observed_path[0].pedestrian
ped_id_ = []
for j, _ in enumerate(paths[1:]): ## Only need neighbour ids
ped_id_.append(paths[j+1][0].pedestrian)

## Write SceneRow
scenerow = trajnetplusplustools.SceneRow(scene_id, ped_id, observed_path[0].frame,
observed_path[0].frame + (seq_length - 1) * frame_diff, 2.5, 0)
# scenerow = trajnetplusplustools.SceneRow(scenerow.scene, scenerow.pedestrian, scenerow.start, scenerow.end, 2.5, 0)
myfile.write(trajnetplusplustools.writers.trajnet(scenerow))
myfile.write('\n')


for m in range(len(predictions)):
prediction, neigh_predictions = predictions[m]
prediction, _ = predictions[m]
## Write Primary
for i in range(len(prediction)):
track = trajnetplusplustools.TrackRow(first_frame + i * frame_diff, ped_id,
prediction[i, 0].item(), prediction[i, 1].item(), m, scene_id)
myfile.write(trajnetplusplustools.writers.trajnet(track))

data = [first_frame + i * frame_diff, ped_id,prediction[i, 0].item(), prediction[i, 1].item(), prediction[i,2].item()]
for d in data:
myfile.write(str(d))
myfile.write(' ')
myfile.write('\n')

## Write Neighbours (if non-empty)
if len(neigh_predictions):
for n in range(neigh_predictions.shape[1]):
neigh = neigh_predictions[:, n]
for j in range(len(neigh)):
track = trajnetplusplustools.TrackRow(first_frame + j * frame_diff, ped_id_[n],
neigh[j, 0].item(), neigh[j, 1].item(), m, scene_id)
myfile.write(trajnetplusplustools.writers.trajnet(track))
myfile.write('\n')
txt_name = path_temp

trajs = np.loadtxt(txt_name, dtype=str)
trajs = np.array(trajs).astype(np.float)
with open(path_pred+ str(dataset_index).zfill(4)+'.txt', 'a') as txtfile:
for pred_id in range(12):
for row_id in range(trajs.shape[0]):
if trajs[row_id,0] == trajs[pred_id,0]:

data_final = [int(trajs[row_id,0]),int(trajs[row_id,1]), 'Pedestrian', 0, 0, -1 ,-1, -1, -1, -1, trajs[row_id,2], trajs[row_id,3]]

for d_final in data_final:
txtfile.write(str(d_final))
txtfile.write(' ')
txtfile.write('\n')

File renamed without changes.
4 changes: 0 additions & 4 deletions train/setup.py → jrdb_baselines/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,6 @@
],
license='MIT',
description='Trajnet baselines.',
long_description=open('README.rst').read(),
author='Sven Kreiss',
author_email='[email protected]',
url='https://github.com/svenkreiss/trajnetbaselines',

install_requires=[
'numpy',
Expand Down
Binary file added jrdb_baselines/trajnetbaselines/.DS_Store
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = '0.1.0'

from . import augmentation
from . import lstm
from . import lstm
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from .loss import PredictionLoss, L2Loss
from .loss import L2Loss
from .lstm import LSTM, LSTMPredictor
from .gridbased_pooling import GridBasedPooling
from .non_gridbased_pooling import NN_Pooling, HiddenStateMLPPooling, AttentionMLPPooling, DirectionalMLPPooling
Expand Down
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import trajnetplusplustools
# import trajnetplusplustools
from .tools.reader import Reader
import os
import pickle

Expand Down Expand Up @@ -44,7 +45,7 @@ def prepare_data(path, subset='/train/', sample=1.0, goals=True):
files = [f.split('.')[-2] for f in os.listdir(path + subset) if f.endswith('.ndjson')]
## Iterate over file names
for file in files:
reader = trajnetplusplustools.Reader(path + subset + file + '.ndjson', scene_type='paths')
reader = Reader(path + subset + file + '.ndjson', scene_type='paths')
## Necessary modification of train scene to add filename
scene = [(file, s_id, s) for s_id, s in reader.scenes(sample=sample)]
if goals:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,4 @@
from collections import defaultdict
import os

import numpy as np
import matplotlib.pyplot as plt

import torch

def one_cold(i, n):
Expand Down Expand Up @@ -172,10 +167,8 @@ def directional(self, obs1, obs2):
## Generate values to input in directional grid tensor (relative velocities in this case)
vel = obs2 - obs1
unfolded = vel.unsqueeze(0).repeat(vel.size(0), 1, 1)
## [num_tracks, 2] --> [num_tracks, num_tracks, 2]
relative = unfolded - vel.unsqueeze(1)
## Deleting Diagonal (Ped wrt itself)
## [num_tracks, num_tracks, 2] --> [num_tracks, num_tracks-1, 2]
relative = relative[~torch.eye(num_tracks).bool()].reshape(num_tracks, num_tracks-1, 2)

## Generate Occupancy Map
Expand All @@ -185,13 +178,11 @@ def social(self, hidden_state, obs1, obs2):
## Makes the Social Grid

num_tracks = obs2.size(0)

## if only primary pedestrian present
if num_tracks == 1:
return self.occupancy(obs2, None, past_obs=obs1)

## Generate values to input in hiddenstate grid tensor (compressed hidden-states in this case)
## [num_tracks, hidden_dim] --> [num_tracks, num_tracks-1, pooling_dim]
hidden_state_grid = hidden_state.repeat(num_tracks, 1).view(num_tracks, num_tracks, -1)
hidden_state_grid = hidden_state_grid[~torch.eye(num_tracks).bool()].reshape(num_tracks, num_tracks-1, -1)
hidden_state_grid = self.hidden_dim_encoding(hidden_state_grid)
Expand All @@ -211,14 +202,11 @@ def dir_social(self, hidden_state, obs1, obs2):
## Generate values to input in directional grid tensor (relative velocities in this case)
vel = obs2 - obs1
unfolded = vel.unsqueeze(0).repeat(vel.size(0), 1, 1)
## [num_tracks, 2] --> [num_tracks, num_tracks, 2]
relative = unfolded - vel.unsqueeze(1)
## Deleting Diagonal (Ped wrt itself)
## [num_tracks, num_tracks, 2] --> [num_tracks, num_tracks-1, 2]
relative = relative[~torch.eye(num_tracks).bool()].reshape(num_tracks, num_tracks-1, 2)

## Generate values to input in hiddenstate grid tensor (compressed hidden-states in this case)
## [num_tracks, hidden_dim] --> [num_tracks, num_tracks-1, pooling_dim]
hidden_state_grid = hidden_state.repeat(num_tracks, 1).view(num_tracks, num_tracks, -1)
hidden_state_grid = hidden_state_grid[~torch.eye(num_tracks).bool()].reshape(num_tracks, num_tracks-1, -1)
hidden_state_grid = self.hidden_dim_encoding(hidden_state_grid)
Expand All @@ -236,7 +224,6 @@ def normalize(relative, obs, past_obs):
theta = (np.pi / 2) - velocity
ct = torch.cos(theta)
st = torch.sin(theta)
## Cleaner?
relative = torch.stack([torch.einsum('tc,ci->ti', pos_instance, torch.Tensor([[ct[i], st[i]], [-st[i], ct[i]]])) for
i, pos_instance in enumerate(relative)], dim=0)
return relative
Expand Down Expand Up @@ -269,12 +256,11 @@ def occupancy(self, obs, other_values=None, past_obs=None):
return self.constant*torch.ones(1, self.pooling_dim, self.n, self.n, device=obs.device)

## Get relative position
## [num_tracks, 2] --> [num_tracks, num_tracks, 2]
unfolded = obs.unsqueeze(0).repeat(obs.size(0), 1, 1)
relative = unfolded - obs.unsqueeze(1)
relative = unfolded
relative[:,:,:2] -= obs.unsqueeze(1)[:,:,:2]
## Deleting Diagonal (Ped wrt itself)
## [num_tracks, num_tracks, 2] --> [num_tracks, num_tracks-1, 2]
relative = relative[~torch.eye(num_tracks).bool()].reshape(num_tracks, num_tracks-1, 2)
relative = relative[~torch.eye(num_tracks).bool()].reshape(num_tracks, num_tracks-1, 3)

## In case of 'occupancy' pooling
if other_values is None:
Expand Down Expand Up @@ -314,7 +300,6 @@ def occupancy(self, obs, other_values=None, past_obs=None):
occ_2d, self.blur_size, 1, int(self.blur_size / 2), count_include_pad=True)

occ_summed = torch.nn.functional.lp_pool2d(occ_blurred, 1, self.pool_size)
# occ_summed = torch.nn.functional.avg_pool2d(occ_blurred, self.pool_size) # faster?
return occ_summed

## Architectures of Encoding Grid
Expand Down
59 changes: 59 additions & 0 deletions jrdb_baselines/trajnetbaselines/lstm/loss.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import torch

class L2Loss(torch.nn.Module):
"""L2 Loss (deterministic version of PredictionLoss)
This Loss penalizes only the primary trajectories
"""
def __init__(self, keep_batch_dim=False):
super(L2Loss, self).__init__()
self.loss = torch.nn.MSELoss(reduction='none')
self.keep_batch_dim = keep_batch_dim
self.loss_multiplier = 100

def col_loss(self, primary, neighbours, batch_split, gamma=2.0):
"""
Penalizes model when primary pedestrian prediction comes close
to the neighbour predictions
primary: Tensor [pred_length, 1, 2]
neighbours: Tensor [pred_length, num_neighbours, 2]
"""

neighbours[neighbours != neighbours] = -1000
exponential_loss = 0.0
for (start, end) in zip(batch_split[:-1], batch_split[1:]):
batch_primary = primary[:, start:start+1]
batch_neigh = neighbours[:, start:end]
distance_to_neigh = torch.norm(batch_neigh - batch_primary, dim=2)
mask_far = (distance_to_neigh < 0.25).detach()
distance_to_neigh = -gamma * distance_to_neigh * mask_far
exponential_loss += distance_to_neigh.exp().sum()
return exponential_loss.sum()

def forward(self, inputs, targets, batch_split):
## Extract primary pedestrians
targets = targets.transpose(0, 1)
targets = targets[batch_split[:-1]]
targets = targets.transpose(0, 1)
inputs = inputs.transpose(0, 1)
inputs = inputs[batch_split[:-1]]
inputs = inputs.transpose(0, 1)

mask_gt = ~torch.isnan(targets[:,:,0])
mask_pred = ~torch.isnan(inputs[:,:,0])
mask = mask_pred*mask_gt

loss_vis = self.loss(inputs[mask], targets[mask])
if inputs[~mask].size(0) == 0:
loss = loss_vis
else:
loss_invis = self.loss(inputs[~mask][:,-1], targets[~mask][:,-1])
loss_invis = torch.cat((loss_invis.unsqueeze(1),torch.zeros(loss_invis.size(0),2).to(loss_invis.device)),dim=1)
loss = torch.cat((loss_vis, loss_invis),dim=0)

## Used in variety loss (SGAN)
if self.keep_batch_dim:
return loss.mean(dim=0).mean(dim=1) * self.loss_multiplier

return torch.mean(loss) * self.loss_multiplier

Loading

0 comments on commit 1fc76f5

Please sign in to comment.