linting, moved env, updated contrib credit

This commit is contained in:
Shannon Sands 2025-05-26 14:35:16 +10:00
parent 81d1ebeaef
commit bf12e7df15
83 changed files with 1560 additions and 640 deletions

View file

@ -0,0 +1,2 @@
here you can find the configs that were used to create the baseline.
Make sure to adapt the paths to data and the experiments output!

View file

@ -0,0 +1,26 @@
task:
name: classification
optimizer:
- name: adamw_baseline
learning_rate: [1.e-2, 1.e-3, 1.e-4]
weight_decay: [10, 1.e-0, 1.e-1, 1.e-2]
- name: adamcpr_fast
learning_rate: [1.e-2, 1.e-3, 1.e-4]
kappa_init_param: [0.125, 0.25, 0.5, 1]
engine:
seed: [1, 2, 3]
# data_dir: ./data
# output_dir: ./experiments
plot: false
silent: true
sbatch_script_template: baselines/sbatch_template.sh # adapt the template to your needs
run_scheduler: slurm_array
sbatch_time_factor: 1.8 # increase this for slower machine
sbatch_args:
partition: single # adapt to your cluster
evaluation:
output_types: [pdf]
plot:
x_axis:
- optimizer.kappa_init_param
- optimizer.weight_decay

View file

@ -0,0 +1,37 @@
task:
name: classification_small
output_dir_name: classification_small_cpr_paper
label_smoothing: 0.1
train_transforms:
trivial_augment:
use: false
optimizer:
- name: adamw_baseline
learning_rate: [1.e-1, 1.e-2, 1.e-3, 1.e-4]
# learning_rate: [1.e-1, 3.16e-2, 1.e-2, 3.16e-3, 1.e-3] # finer grid
weight_decay: [1, 1.e-1, 1.e-2, 1.e-3, 1.e-4, 0]
warmup_factor: 0.025
eta_min_factor: 0.1
- name: adamcpr_fast
learning_rate: [1.e-1, 1.e-2, 1.e-3, 1.e-4]
# learning_rate: [1.e-1, 3.16e-2, 1.e-2, 3.16e-3, 1.e-3] # finer grid
kappa_init_param: [0.5, 1, 2, 4, 8, 16, 32]
warmup_factor: 0.025
eta_min_factor: 0.1
engine:
seed: [1, 2, 3]
# data_dir: ./data
# output_dir: ./experiments
plot: false
silent: true
sbatch_script_template: baselines/sbatch_template.sh # adapt the template to your needs
run_scheduler: slurm_array
sbatch_time_factor: 1.8 # increase this for slower machine
sbatch_args:
partition: single # adapt to your cluster
evaluation:
output_types: [pdf]
plot:
x_axis:
- optimizer.kappa_init_param
- optimizer.weight_decay

View file

@ -0,0 +1,28 @@
task:
name: classification_small
optimizer:
- name: adamw_baseline
learning_rate: [1.e-1, 1.e-2, 1.e-3, 1.e-4]
# learning_rate: [1.e-1, 3.16e-2, 1.e-2, 3.16e-3, 1.e-3] # finer grid
weight_decay: [10, 1.e-0, 1.e-1, 1.e-2, 1.e-3]
- name: adamcpr_fast
learning_rate: [1.e-1, 1.e-2, 1.e-3, 1.e-4]
# learning_rate: [1.e-1, 3.16e-2, 1.e-2, 3.16e-3, 1.e-3] # finer grid
kappa_init_param: [1, 2, 4, 8, 16]
engine:
seed: [1, 2, 3]
# data_dir: ./data
# output_dir: ./experiments
plot: false
silent: true
sbatch_script_template: baselines/sbatch_template.sh # adapt the template to your needs
run_scheduler: slurm_array
sbatch_time_factor: 1.8 # increase this for slower machine
sbatch_args:
partition: single # adapt to your cluster
evaluation:
output_types: [pdf]
plot:
x_axis:
- optimizer.kappa_init_param
- optimizer.weight_decay

View file

@ -0,0 +1,26 @@
task:
name: graph
optimizer:
- name: adamw_baseline
learning_rate: [1.e-2, 1.e-3, 1.e-4]
weight_decay: [1.e-1, 1.e-2, 1.e-3, 0]
- name: adamcpr_fast
learning_rate: [1.e-2, 1.e-3, 1.e-4]
kappa_init_param: [0.5, 1, 2, 4]
engine:
seed: [1, 2, 3]
# data_dir: ./data
# output_dir: ./experiments
plot: false
silent: true
sbatch_script_template: baselines/sbatch_template.sh # adapt the template to your needs
run_scheduler: slurm_array
sbatch_time_factor: 1.8 # increase this for slower machine
sbatch_args:
partition: single # adapt to your cluster
evaluation:
output_types: [pdf]
plot:
x_axis:
- optimizer.kappa_init_param
- optimizer.weight_decay

View file

@ -0,0 +1,26 @@
task:
name: graph_tiny
optimizer:
- name: adamw_baseline
learning_rate: [1.e-0, 1.e-1, 1.e-2, 1.e-3]
weight_decay: [1, 1.e-1, 1.e-2, 1.e-3, 1.e-4, 0]
- name: adamcpr_fast
learning_rate: [1.e-0, 1.e-1, 1.e-2, 1.e-3]
kappa_init_param: [0.5, 1, 2, 4, 8, 16, 32]
engine:
seed: [1, 2, 3]
# data_dir: ./data
# output_dir: ./experiments
plot: false
silent: true
sbatch_script_template: baselines/sbatch_template.sh # adapt the template to your needs
run_scheduler: slurm_array
sbatch_time_factor: 1 # increase this for slower machine
sbatch_args:
partition: single # adapt to your cluster
evaluation:
output_types: [pdf]
plot:
x_axis:
- optimizer.kappa_init_param
- optimizer.weight_decay

View file

@ -0,0 +1,28 @@
task:
name: mnist
optimizer:
- name: adamw_baseline
learning_rate: [1.e-1, 1.e-2, 1.e-3]
# learning_rate: [1.e-1, 3.16e-2, 1.e-2, 3.16e-3, 1.e-3] # finer grid
weight_decay: [1.e-0, 1.e-1, 1.e-2, 1.e-3]
- name: adamcpr_fast
learning_rate: [1.e-1, 1.e-2, 1.e-3]
# learning_rate: [1.e-1, 3.16e-2, 1.e-2, 3.16e-3, 1.e-3] # finer grid
kappa_init_param: [0.5, 1, 2, 4, 8, 16, 32]
engine:
seed: [1, 2, 3]
# data_dir: ./data
# output_dir: ./experiments
plot: false
silent: true
sbatch_script_template: baselines/sbatch_template.sh # adapt the template to your needs
run_scheduler: slurm_array
sbatch_time_factor: 1.8 # increase this for slower machine
sbatch_args:
partition: single # adapt to your cluster
evaluation:
output_types: [pdf]
plot:
x_axis:
- optimizer.kappa_init_param
- optimizer.weight_decay

View file

@ -0,0 +1,23 @@
module load devel/miniconda
nvidia-smi
source ~/.bashrc
# some users reported issues with stacked conda environments; see https://en.wikipedia.org/wiki/Rule_of_three_(writing)
conda deactivate
conda deactivate
conda deactivate
conda activate fob
# Running the job
start=$(date +%s)
__FOB_COMMAND__
finish=$(date +%s)
runtime=$((finish-start))
echo Job execution complete.
echo Total job runtime: $runtime seconds

View file

@ -0,0 +1,27 @@
task:
name: segmentation
optimizer:
- name: adamw_baseline
learning_rate: [3.16e-3, 1.e-3, 3.16e-4]
weight_decay: [1.e-1, 1.e-2, 1.e-3, 0]
- name: adamcpr_fast
learning_rate: [3.16e-3, 1.e-3, 3.16e-4]
kappa_init_param: [1, 4, 16, 64]
engine:
seed: [1, 2, 3]
# data_dir: ./data
# output_dir: ./experiments
plot: false
silent: true
sbatch_script_template: baselines/sbatch_template.sh # adapt the template to your needs
run_scheduler: slurm_array
sbatch_time_factor: 1.5 # increase this for slower machine
sbatch_args:
partition: single # adapt to your cluster
save_sbatch_scripts: slurm-scripts
evaluation:
output_types: [pdf]
plot:
x_axis:
- optimizer.kappa_init_param
- optimizer.weight_decay

View file

@ -0,0 +1,26 @@
task:
name: tabular
optimizer:
- name: adamw_baseline
learning_rate: [1.e-2, 1.e-3, 1.e-4]
weight_decay: [10, 1.e-0, 1.e-1, 1.e-2, 1.e-3]
- name: adamcpr_fast
learning_rate: [1.e-2, 1.e-3, 1.e-4]
kappa_init_param: [0.5, 1, 2, 4, 8]
engine:
seed: [1, 2, 3]
# data_dir: ./data
# output_dir: ./experiments
plot: false
silent: true
sbatch_script_template: baselines/sbatch_template.sh # adapt the template to your needs
run_scheduler: slurm_array
sbatch_time_factor: 1.8 # increase this for slower machine
sbatch_args:
partition: single # adapt to your cluster
evaluation:
output_types: [pdf]
plot:
x_axis:
- optimizer.kappa_init_param
- optimizer.weight_decay

View file

@ -0,0 +1,28 @@
task:
name: translation
optimizer:
- name: adamw_baseline
learning_rate: [3.16e-3, 1.e-3, 3.16e-4]
# learning_rate: [3.16e-3, 1.77e-3, 1.e-3, 5.16e-3, 3.16e-4] # finer grid
weight_decay: [1.e-0, 1.e-1, 1.e-2]
- name: adamcpr_fast
learning_rate: [3.16e-3, 1.e-3, 3.16e-4]
# learning_rate: [3.16e-3, 1.77e-3, 1.e-3, 5.16e-3, 3.16e-4] # finer grid
kappa_init_param: [0.5, 1, 2]
engine:
seed: [1, 2, 3]
# data_dir: ./data
# output_dir: ./experiments
plot: false
silent: true
sbatch_script_template: baselines/sbatch_template.sh # adapt the template to your needs
run_scheduler: slurm_array
sbatch_time_factor: 2.0 # increase this for slower machine
sbatch_args:
partition: single # adapt to your cluster
evaluation:
output_types: [pdf]
plot:
x_axis:
- optimizer.kappa_init_param
- optimizer.weight_decay