forked from Lightning-AI/deep-learning-project-template
-
Notifications
You must be signed in to change notification settings - Fork 18
/
Copy pathdefaults.yaml
70 lines (66 loc) · 2.01 KB
/
defaults.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
hydra:
run:
# Configure output dir of each experiment programmatically from the arguments
# Example "outputs/mnist/classifier/baseline/2021-03-10-141516"
dir: outputs/${data.name}/${model.name}/${experiment}/${now:%Y-%m-%d_%H%M%S}
# Global configurations shared between different modules
experiment: baseline
# Composing nested config with default
defaults:
- data: mnist # Path to sub-config, can also omit the .yaml extension
- model: classifier.yaml # I add full path for easy navigation in vim (cursor in path, press gf)
- override hydra/job_logging: colorlog
- override hydra/hydra_logging: colorlog
# Pytorch lightning trainer's argument
# default flags are commented to avoid clustering the hyperparameters
trainer:
# accelerator: None
# accumulate_grad_batches: 1
# amp_backend: native
# amp_level: O2
# auto_lr_find: False
# auto_scale_batch_size: False
# auto_select_gpus: False
benchmark: True
# check_val_every_n_epoch: 1
# checkpoint_callback: True
# default_root_dir:
# deterministic: False
# fast_dev_run: False
# flush_logs_every_n_steps: 100
# gpus:
# gradient_clip_val: 0
# limit_predict_batches: 1.0
# limit_test_batches: 1.0
# limit_train_batches: 1.0
# limit_val_batches: 1.0
# log_every_n_steps: 50
# log_gpu_memory: False
# logger: True
# max_epochs: None
# max_steps: None
# min_epochs: None
# min_steps: None
# move_metrics_to_cpu: False
# multiple_trainloader_mode: max_size_cycle
# num_nodes: 1
# num_processes: 1
# num_sanity_val_steps: 2
# overfit_batches: 0.0
# plugins: None
# precision: 16
# prepare_data_per_node: True
# process_position: 0
# profiler: None
# progress_bar_refresh_rate: None
# reload_dataloaders_every_epoch: False
# replace_sampler_ddp: True
# resume_from_checkpoint: None
# stochastic_weight_avg: False
# sync_batchnorm: False
terminate_on_nan: True
# track_grad_norm: -1
# truncated_bptt_steps: None
# val_check_interval: 1.0
# weights_save_path: None
# weights_summary: top