Skip to content

Commit

Permalink
Merge branch 'main' into upgrade-torch
Browse files Browse the repository at this point in the history
  • Loading branch information
CharlesGaydon authored Jan 3, 2024
2 parents 86e4977 + 0d13399 commit 11eaaa1
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 1 deletion.
18 changes: 18 additions & 0 deletions configs/experiment/RandLaNet_base_run_FR-2x3GPUs.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# @package _global_
defaults:
- RandLaNet_base_run_FR.yaml

logger:
comet:
experiment_name: "RandLaNet_base_run_FR-2x3GPUs"


# 2 nodes x 3 GPUs - No gradient accumulation.
# This is equivalent to training with 2 GPUs with gradients accumulated 3 times.
# Setting precision=16 did not bring any speed improvement for Lidar HD data and RandLa-Net model.
trainer:
strategy: ddp_find_unused_parameters_false
accelerator: gpu
num_nodes: 2
devices: 3
accumulate_grad_batches: 1
3 changes: 2 additions & 1 deletion configs/experiment/RandLaNet_base_run_FR-MultiGPU.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,10 @@ defaults:

logger:
comet:
experiment_name: "Pyg RandLaNet - FR Data - 2xGPUs"
experiment_name: "RandLaNet_base_run_FR-2xGPUs"

trainer:
strategy: ddp_find_unused_parameters_false
# Replace by cpu to simulate multi-cpus training.
accelerator: gpu
devices: 2

0 comments on commit 11eaaa1

Please sign in to comment.