forked from microsoft/torchgeo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdefaults.yaml
76 lines (72 loc) · 2.38 KB
/
defaults.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
config_file: null # This lets the user pass a config filename to load other arguments from
program: # These are the arguments that define how the train.py script works
seed: 1337
output_dir: output
data_dir: data
log_dir: logs
overwrite: False
experiment: # These are arugments specific to the experiment we are running
name: ??? # this is the name given to this experiment run
task: ??? # this is the type of task to use for this experiement (e.g. "landcoverai")
module: # these will be passed as kwargs to the LightningModule assosciated with the task
learning_rate: 1e-3
datamodule: # these will be passed as kwargs to the LightningDataModule assosciated with the task
root_dir: ${program.data_dir}
seed: ${program.seed}
batch_size: 32
num_workers: 4
# The values here are taken from the defaults here https://pytorch-lightning.readthedocs.io/en/1.3.8/common/trainer.html#init
# this probably should be made into a schema, e.g. as shown https://omegaconf.readthedocs.io/en/2.0_branch/structured_config.html#merging-with-other-configs
trainer: # These are the parameters passed to the pytorch lightning Trainer object
logger: True
checkpoint_callback: True
callbacks: null
default_root_dir: null
gradient_clip_val: 0.0
gradient_clip_algorithm: 'norm'
process_position: 0
num_nodes: 1
num_processes: 1
gpus: null
auto_select_gpus: False
tpu_cores: null
log_gpu_memory: null
progress_bar_refresh_rate: null
overfit_batches: 0.0
track_grad_norm: -1
check_val_every_n_epoch: 1
fast_dev_run: False
accumulate_grad_batches: 1
max_epochs: null
min_epochs: null
max_steps: null
min_steps: null
max_time: null
limit_train_batches: 1.0
limit_val_batches: 1.0
limit_test_batches: 1.0
limit_predict_batches: 1.0
val_check_interval: 1.0
flush_logs_every_n_steps: 100
log_every_n_steps: 50
accelerator: null
sync_batchnorm: False
precision: 32
weights_summary: 'top'
weights_save_path: null
num_sanity_val_steps: 2
resume_from_checkpoint: null
profiler: null
benchmark: False
deterministic: False
reload_dataloaders_every_epoch: False
auto_lr_find: False
replace_sampler_ddp: True
terminate_on_nan: False
auto_scale_batch_size: False
prepare_data_per_node: True
plugins: null
amp_backend: 'native'
move_metrics_to_cpu: False
multiple_trainloader_mode: 'max_size_cycle'
stochastic_weight_avg: False