Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
huaerkl
fairseq-data2vec_pytorch
Commits
72f5785f
Commit
72f5785f
authored
Aug 15, 2023
by
huaerkl
Browse files
v1.0
parents
Pipeline
#505
canceled with stages
Changes
508
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
934 additions
and
0 deletions
+934
-0
examples/data2vec/config/v2/text_finetuning/mrpc.yaml
examples/data2vec/config/v2/text_finetuning/mrpc.yaml
+60
-0
examples/data2vec/config/v2/text_finetuning/qnli.yaml
examples/data2vec/config/v2/text_finetuning/qnli.yaml
+59
-0
examples/data2vec/config/v2/text_finetuning/qqp.yaml
examples/data2vec/config/v2/text_finetuning/qqp.yaml
+60
-0
examples/data2vec/config/v2/text_finetuning/rte.yaml
examples/data2vec/config/v2/text_finetuning/rte.yaml
+59
-0
examples/data2vec/config/v2/text_finetuning/run_config/local.yaml
.../data2vec/config/v2/text_finetuning/run_config/local.yaml
+15
-0
examples/data2vec/config/v2/text_finetuning/sst_2.yaml
examples/data2vec/config/v2/text_finetuning/sst_2.yaml
+59
-0
examples/data2vec/config/v2/text_finetuning/sts_b.yaml
examples/data2vec/config/v2/text_finetuning/sts_b.yaml
+61
-0
examples/data2vec/config/vision/finetuning/imagenet.yaml
examples/data2vec/config/vision/finetuning/imagenet.yaml
+52
-0
examples/data2vec/config/vision/finetuning/mae_imagenet_clean.yaml
...data2vec/config/vision/finetuning/mae_imagenet_clean.yaml
+65
-0
examples/data2vec/config/vision/finetuning/mae_imagenet_huge_clean.yaml
...vec/config/vision/finetuning/mae_imagenet_huge_clean.yaml
+68
-0
examples/data2vec/config/vision/finetuning/mae_imagenet_large_clean.yaml
...ec/config/vision/finetuning/mae_imagenet_large_clean.yaml
+68
-0
examples/data2vec/config/vision/finetuning/run_config/local.yaml
...s/data2vec/config/vision/finetuning/run_config/local.yaml
+15
-0
examples/data2vec/config/vision/finetuning/run_config/slurm_1.yaml
...data2vec/config/vision/finetuning/run_config/slurm_1.yaml
+37
-0
examples/data2vec/config/vision/finetuning/run_config/slurm_1_aws.yaml
...2vec/config/vision/finetuning/run_config/slurm_1_aws.yaml
+36
-0
examples/data2vec/config/vision/finetuning/run_config/slurm_2.yaml
...data2vec/config/vision/finetuning/run_config/slurm_2.yaml
+38
-0
examples/data2vec/config/vision/finetuning/run_config/slurm_2_aws.yaml
...2vec/config/vision/finetuning/run_config/slurm_2_aws.yaml
+38
-0
examples/data2vec/config/vision/finetuning/run_config/slurm_3.yaml
...data2vec/config/vision/finetuning/run_config/slurm_3.yaml
+36
-0
examples/data2vec/config/vision/finetuning/run_config/slurm_4.yaml
...data2vec/config/vision/finetuning/run_config/slurm_4.yaml
+36
-0
examples/data2vec/config/vision/finetuning/run_config/slurm_4_aws.yaml
...2vec/config/vision/finetuning/run_config/slurm_4_aws.yaml
+36
-0
examples/data2vec/config/vision/finetuning/run_config/slurm_6_aws.yaml
...2vec/config/vision/finetuning/run_config/slurm_6_aws.yaml
+36
-0
No files found.
Too many changes to show.
To preserve performance only
508 of 508+
files are displayed.
Plain diff
Email patch
examples/data2vec/config/v2/text_finetuning/mrpc.yaml
0 → 100644
View file @
72f5785f
# @package _group_
common
:
fp16
:
true
fp16_init_scale
:
4
threshold_loss_scale
:
1
fp16_scale_window
:
128
log_format
:
json
log_interval
:
200
user_dir
:
${env:PWD}/examples/data2vec
task
:
_name
:
sentence_prediction
data
:
???
init_token
:
0
separator_token
:
2
num_classes
:
2
max_positions
:
512
d2v2_multi
:
True
checkpoint
:
best_checkpoint_metric
:
acc_and_f1
maximize_best_checkpoint_metric
:
true
no_epoch_checkpoints
:
true
distributed_training
:
find_unused_parameters
:
true
distributed_world_size
:
1
nprocs_per_node
:
1
distributed_port
:
-1
criterion
:
_name
:
sentence_prediction
report_acc_and_f1
:
True
dataset
:
batch_size
:
16
required_batch_size_multiple
:
1
max_tokens
:
4400
num_workers
:
1
optimizer
:
_name
:
adam
weight_decay
:
0.1
adam_betas
:
(0.9,0.98)
adam_eps
:
1e-06
lr_scheduler
:
_name
:
polynomial_decay
warmup_updates
:
137
optimization
:
clip_norm
:
0.0
lr
:
[
2e-05
]
max_update
:
2296
max_epoch
:
10
model
:
_name
:
data2vec_text_classification
model_path
:
???
examples/data2vec/config/v2/text_finetuning/qnli.yaml
0 → 100644
View file @
72f5785f
# @package _group_
common
:
fp16
:
true
fp16_init_scale
:
4
threshold_loss_scale
:
1
fp16_scale_window
:
128
log_format
:
json
log_interval
:
200
user_dir
:
${env:PWD}/examples/data2vec
task
:
_name
:
sentence_prediction
data
:
???
init_token
:
0
separator_token
:
2
num_classes
:
2
max_positions
:
512
d2v2_multi
:
True
checkpoint
:
best_checkpoint_metric
:
accuracy
maximize_best_checkpoint_metric
:
true
no_epoch_checkpoints
:
true
distributed_training
:
find_unused_parameters
:
true
distributed_world_size
:
1
nprocs_per_node
:
1
distributed_port
:
-1
criterion
:
_name
:
sentence_prediction
dataset
:
batch_size
:
32
required_batch_size_multiple
:
1
max_tokens
:
4400
num_workers
:
1
optimizer
:
_name
:
adam
weight_decay
:
0.1
adam_betas
:
(0.9,0.98)
adam_eps
:
1e-06
lr_scheduler
:
_name
:
polynomial_decay
warmup_updates
:
1986
optimization
:
clip_norm
:
0.0
lr
:
[
2e-05
]
max_update
:
33112
max_epoch
:
10
model
:
_name
:
data2vec_text_classification
model_path
:
???
examples/data2vec/config/v2/text_finetuning/qqp.yaml
0 → 100644
View file @
72f5785f
# @package _group_
common
:
fp16
:
true
fp16_init_scale
:
4
threshold_loss_scale
:
1
fp16_scale_window
:
128
log_format
:
json
log_interval
:
200
user_dir
:
${env:PWD}/examples/data2vec
task
:
_name
:
sentence_prediction
data
:
???
init_token
:
0
separator_token
:
2
num_classes
:
2
max_positions
:
512
d2v2_multi
:
True
checkpoint
:
best_checkpoint_metric
:
acc_and_f1
maximize_best_checkpoint_metric
:
true
no_epoch_checkpoints
:
true
distributed_training
:
find_unused_parameters
:
true
distributed_world_size
:
1
nprocs_per_node
:
1
distributed_port
:
-1
criterion
:
_name
:
sentence_prediction
report_acc_and_f1
:
True
dataset
:
batch_size
:
32
required_batch_size_multiple
:
1
max_tokens
:
4400
num_workers
:
1
optimizer
:
_name
:
adam
weight_decay
:
0.1
adam_betas
:
(0.9,0.98)
adam_eps
:
1e-06
lr_scheduler
:
_name
:
polynomial_decay
warmup_updates
:
28318
optimization
:
clip_norm
:
0.0
lr
:
[
2e-05
]
max_update
:
113272
max_epoch
:
10
model
:
_name
:
data2vec_text_classification
model_path
:
???
examples/data2vec/config/v2/text_finetuning/rte.yaml
0 → 100644
View file @
72f5785f
# @package _group_
common
:
fp16
:
true
fp16_init_scale
:
4
threshold_loss_scale
:
1
fp16_scale_window
:
128
log_format
:
json
log_interval
:
200
user_dir
:
${env:PWD}/examples/data2vec
task
:
_name
:
sentence_prediction
data
:
???
init_token
:
0
separator_token
:
2
num_classes
:
2
max_positions
:
512
d2v2_multi
:
True
checkpoint
:
best_checkpoint_metric
:
accuracy
maximize_best_checkpoint_metric
:
true
no_epoch_checkpoints
:
true
distributed_training
:
find_unused_parameters
:
true
distributed_world_size
:
1
nprocs_per_node
:
1
distributed_port
:
-1
criterion
:
_name
:
sentence_prediction
dataset
:
batch_size
:
16
required_batch_size_multiple
:
1
max_tokens
:
4400
num_workers
:
1
optimizer
:
_name
:
adam
weight_decay
:
0.1
adam_betas
:
(0.9,0.98)
adam_eps
:
1e-06
lr_scheduler
:
_name
:
polynomial_decay
warmup_updates
:
122
optimization
:
clip_norm
:
0.0
lr
:
[
2e-05
]
max_update
:
2036
max_epoch
:
10
model
:
_name
:
data2vec_text_classification
model_path
:
???
examples/data2vec/config/v2/text_finetuning/run_config/local.yaml
0 → 100644
View file @
72f5785f
# @package _global_
hydra
:
sweep
:
dir
:
${env:PWD}/tmp_dbg/${now:%H-%M-%S}
distributed_training
:
distributed_world_size
:
1
nprocs_per_node
:
1
distributed_port
:
-1
common
:
log_interval
:
1
dataset
:
num_workers
:
0
examples/data2vec/config/v2/text_finetuning/sst_2.yaml
0 → 100644
View file @
72f5785f
# @package _group_
common
:
fp16
:
true
fp16_init_scale
:
4
threshold_loss_scale
:
1
fp16_scale_window
:
128
log_format
:
json
log_interval
:
200
user_dir
:
${env:PWD}/examples/data2vec
task
:
_name
:
sentence_prediction
data
:
???
init_token
:
0
separator_token
:
2
num_classes
:
2
max_positions
:
512
d2v2_multi
:
True
checkpoint
:
best_checkpoint_metric
:
accuracy
maximize_best_checkpoint_metric
:
true
no_epoch_checkpoints
:
true
distributed_training
:
find_unused_parameters
:
true
distributed_world_size
:
1
nprocs_per_node
:
1
distributed_port
:
-1
criterion
:
_name
:
sentence_prediction
dataset
:
batch_size
:
32
required_batch_size_multiple
:
1
max_tokens
:
4400
num_workers
:
1
optimizer
:
_name
:
adam
weight_decay
:
0.1
adam_betas
:
(0.9,0.98)
adam_eps
:
1e-06
lr_scheduler
:
_name
:
polynomial_decay
warmup_updates
:
1256
optimization
:
clip_norm
:
0.0
lr
:
[
2e-05
]
max_update
:
20935
max_epoch
:
10
model
:
_name
:
data2vec_text_classification
model_path
:
???
examples/data2vec/config/v2/text_finetuning/sts_b.yaml
0 → 100644
View file @
72f5785f
# @package _group_
common
:
fp16
:
true
fp16_init_scale
:
4
threshold_loss_scale
:
1
fp16_scale_window
:
128
log_format
:
json
log_interval
:
200
user_dir
:
${env:PWD}/examples/data2vec
task
:
_name
:
sentence_prediction
data
:
???
init_token
:
0
separator_token
:
2
num_classes
:
1
max_positions
:
512
d2v2_multi
:
True
checkpoint
:
best_checkpoint_metric
:
pearson_and_spearman
maximize_best_checkpoint_metric
:
true
no_epoch_checkpoints
:
true
distributed_training
:
find_unused_parameters
:
true
distributed_world_size
:
1
nprocs_per_node
:
1
distributed_port
:
-1
criterion
:
_name
:
sentence_prediction
regression_target
:
true
report_pearson_and_spearman
:
True
dataset
:
batch_size
:
16
required_batch_size_multiple
:
1
max_tokens
:
4400
num_workers
:
1
optimizer
:
_name
:
adam
weight_decay
:
0.1
adam_betas
:
(0.9,0.98)
adam_eps
:
1e-06
lr_scheduler
:
_name
:
polynomial_decay
warmup_updates
:
214
optimization
:
clip_norm
:
0.0
lr
:
[
4e-05
]
max_update
:
3598
max_epoch
:
10
model
:
_name
:
data2vec_text_classification
model_path
:
???
examples/data2vec/config/vision/finetuning/imagenet.yaml
0 → 100644
View file @
72f5785f
# @package _group_
common
:
fp16
:
true
log_format
:
json
log_interval
:
200
tensorboard_logdir
:
tb
checkpoint
:
save_interval
:
1
save_interval_updates
:
25000
keep_interval_updates
:
1
no_epoch_checkpoints
:
true
best_checkpoint_metric
:
accuracy
task
:
_name
:
image_classification
data
:
/datasets01/imagenet_full_size/061417
dataset
:
num_workers
:
6
batch_size
:
64
skip_invalid_size_inputs_valid_test
:
true
required_batch_size_multiple
:
1
valid_subset
:
val
distributed_training
:
distributed_world_size
:
8
ddp_backend
:
c10d
criterion
:
_name
:
model
log_keys
:
-
correct
optimization
:
max_update
:
100000
lr
:
[
0.0005
]
optimizer
:
_name
:
adam
adam_betas
:
(0.9,0.98)
adam_eps
:
1e-06
weight_decay
:
0.01
lr_scheduler
:
_name
:
cosine
warmup_updates
:
10000
model
:
_name
:
data2vec_image_classification
model_path
:
???
examples/data2vec/config/vision/finetuning/mae_imagenet_clean.yaml
0 → 100644
View file @
72f5785f
# @package _group_
common
:
fp16
:
true
log_format
:
json
log_interval
:
200
tensorboard_logdir
:
tb
fp16_no_flatten_grads
:
true
checkpoint
:
save_interval
:
1
save_interval_updates
:
25000
keep_interval_updates
:
1
no_epoch_checkpoints
:
true
best_checkpoint_metric
:
accuracy
maximize_best_checkpoint_metric
:
true
task
:
_name
:
mae_image_classification
data
:
/datasets01/imagenet_full_size/061417
dataset
:
num_workers
:
6
batch_size
:
32
skip_invalid_size_inputs_valid_test
:
true
required_batch_size_multiple
:
2
valid_subset
:
val
distributed_training
:
distributed_world_size
:
16
ddp_backend
:
c10d
criterion
:
_name
:
model
log_keys
:
-
correct
optimization
:
max_update
:
250200
lr
:
[
0.001
]
optimizer
:
_name
:
composite
dynamic_groups
:
true
groups
:
default
:
lr_float
:
0.001
optimizer
:
_name
:
adam
adam_betas
:
[
0.9
,
0.95
]
weight_decay
:
0.05
lr_scheduler
:
_name
:
cosine
warmup_updates
:
16000
min_lr
:
1e-6
lr_scheduler
:
pass_through
model
:
_name
:
mae_image_classification
mixup
:
0.7
mixup_prob
:
0.9
model_path
:
???
examples/data2vec/config/vision/finetuning/mae_imagenet_huge_clean.yaml
0 → 100644
View file @
72f5785f
# @package _group_
common
:
fp16
:
true
log_format
:
json
log_interval
:
200
tensorboard_logdir
:
tb
fp16_no_flatten_grads
:
true
checkpoint
:
save_interval
:
1
save_interval_updates
:
25000
keep_interval_updates
:
1
no_epoch_checkpoints
:
true
best_checkpoint_metric
:
accuracy
maximize_best_checkpoint_metric
:
true
task
:
_name
:
mae_image_classification
data
:
/datasets01/imagenet_full_size/061417
dataset
:
num_workers
:
6
batch_size
:
32
skip_invalid_size_inputs_valid_test
:
true
required_batch_size_multiple
:
2
valid_subset
:
val
distributed_training
:
distributed_world_size
:
16
ddp_backend
:
c10d
criterion
:
_name
:
model
log_keys
:
-
correct
optimization
:
max_update
:
125200
lr
:
[
0.0005
]
clip_norm
:
4
optimizer
:
_name
:
composite
dynamic_groups
:
true
groups
:
default
:
lr_float
:
0.0005
optimizer
:
_name
:
adam
adam_betas
:
[
0.9
,
0.95
]
weight_decay
:
0.05
lr_scheduler
:
_name
:
cosine
warmup_updates
:
16000
min_lr
:
1e-20
lr_scheduler
:
pass_through
model
:
_name
:
mae_image_classification
mixup
:
0.7
mixup_prob
:
0.9
layer_decay
:
0.75
drop_path_rate
:
0.2
model_path
:
???
examples/data2vec/config/vision/finetuning/mae_imagenet_large_clean.yaml
0 → 100644
View file @
72f5785f
# @package _group_
common
:
fp16
:
true
log_format
:
json
log_interval
:
200
tensorboard_logdir
:
tb
fp16_no_flatten_grads
:
true
checkpoint
:
save_interval
:
1
save_interval_updates
:
25000
keep_interval_updates
:
1
no_epoch_checkpoints
:
true
best_checkpoint_metric
:
accuracy
maximize_best_checkpoint_metric
:
true
task
:
_name
:
mae_image_classification
data
:
/datasets01/imagenet_full_size/061417
dataset
:
num_workers
:
6
batch_size
:
32
skip_invalid_size_inputs_valid_test
:
true
required_batch_size_multiple
:
2
valid_subset
:
val
distributed_training
:
distributed_world_size
:
16
ddp_backend
:
c10d
criterion
:
_name
:
model
log_keys
:
-
correct
optimization
:
max_update
:
125200
lr
:
[
0.0005
]
clip_norm
:
4
optimizer
:
_name
:
composite
dynamic_groups
:
true
groups
:
default
:
lr_float
:
0.0005
optimizer
:
_name
:
adam
adam_betas
:
[
0.9
,
0.95
]
weight_decay
:
0.05
lr_scheduler
:
_name
:
cosine
warmup_updates
:
16000
min_lr
:
1e-7
lr_scheduler
:
pass_through
model
:
_name
:
mae_image_classification
mixup
:
0.7
mixup_prob
:
0.9
layer_decay
:
0.75
drop_path_rate
:
0.2
model_path
:
???
examples/data2vec/config/vision/finetuning/run_config/local.yaml
0 → 100644
View file @
72f5785f
# @package _global_
hydra
:
sweep
:
dir
:
${env:PWD}/tmp_dbg/${now:%H-%M-%S}
distributed_training
:
distributed_world_size
:
1
nprocs_per_node
:
1
distributed_port
:
-1
common
:
log_interval
:
1
dataset
:
num_workers
:
0
examples/data2vec/config/vision/finetuning/run_config/slurm_1.yaml
0 → 100644
View file @
72f5785f
# @package _global_
hydra
:
job
:
config
:
override_dirname
:
kv_sep
:
'
:'
item_sep
:
'
/'
exclude_keys
:
-
run_config
-
distributed_training.distributed_port
-
distributed_training.distributed_world_size
-
model.pretrained_model_path
-
model.target_network_path
-
next_script
-
task.cache_in_scratch
-
task.data
-
checkpoint.save_interval_updates
-
checkpoint.keep_interval_updates
-
checkpoint.save_on_overflow
-
common.log_interval
-
common.user_dir
sweep
:
dir
:
/checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname}
subdir
:
'
'
launcher
:
submitit_folder
:
${hydra.sweep.dir}
timeout_min
:
4320
cpus_per_task
:
80
gpus_per_node
:
8
tasks_per_node
:
1
mem_gb
:
450
nodes
:
1
name
:
${env:PREFIX}_${hydra.job.config_name}
partition
:
devlab,learnlab,learnfair,scavenge
constraint
:
volta32gb,ib4
max_num_timeout
:
30
examples/data2vec/config/vision/finetuning/run_config/slurm_1_aws.yaml
0 → 100644
View file @
72f5785f
# @package _global_
hydra
:
job
:
config
:
override_dirname
:
kv_sep
:
'
:'
item_sep
:
'
/'
exclude_keys
:
-
run_config
-
distributed_training.distributed_port
-
distributed_training.distributed_world_size
-
model.pretrained_model_path
-
model.target_network_path
-
next_script
-
task.cache_in_scratch
-
task.data
-
checkpoint.save_interval_updates
-
checkpoint.keep_interval_updates
-
checkpoint.save_on_overflow
-
common.log_interval
-
common.user_dir
sweep
:
dir
:
/checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname}
subdir
:
'
'
launcher
:
submitit_folder
:
${hydra.sweep.dir}
timeout_min
:
4320
cpus_per_task
:
80
gpus_per_node
:
8
tasks_per_node
:
1
mem_gb
:
0
nodes
:
1
name
:
${env:PREFIX}_${hydra.job.config_name}
partition
:
wav2vec,learnlab,learnfair
max_num_timeout
:
30
examples/data2vec/config/vision/finetuning/run_config/slurm_2.yaml
0 → 100644
View file @
72f5785f
# @package _global_
hydra
:
job
:
config
:
override_dirname
:
kv_sep
:
'
:'
item_sep
:
'
/'
exclude_keys
:
-
run_config
-
distributed_training.distributed_port
-
distributed_training.distributed_world_size
-
model.pretrained_model_path
-
model.target_network_path
-
next_script
-
task.cache_in_scratch
-
task.data
-
checkpoint.save_interval_updates
-
checkpoint.keep_interval_updates
-
checkpoint.save_on_overflow
-
common.log_interval
-
common.user_dir
-
task.local_cache_path
sweep
:
dir
:
/checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname}
subdir
:
'
'
launcher
:
submitit_folder
:
${hydra.sweep.dir}
timeout_min
:
4320
cpus_per_task
:
10
gpus_per_node
:
8
tasks_per_node
:
8
mem_gb
:
450
nodes
:
2
name
:
${env:PREFIX}_${hydra.job.config_name}
partition
:
devlab,learnlab,learnfair,scavenge
constraint
:
volta32gb,ib4
max_num_timeout
:
30
examples/data2vec/config/vision/finetuning/run_config/slurm_2_aws.yaml
0 → 100644
View file @
72f5785f
# @package _global_
hydra
:
job
:
config
:
override_dirname
:
kv_sep
:
'
:'
item_sep
:
'
/'
exclude_keys
:
-
run_config
-
distributed_training.distributed_port
-
distributed_training.distributed_world_size
-
model.pretrained_model_path
-
model.target_network_path
-
next_script
-
task.cache_in_scratch
-
task.data
-
checkpoint.save_interval_updates
-
checkpoint.keep_interval_updates
-
checkpoint.save_on_overflow
-
common.log_interval
-
common.user_dir
-
task.local_cache_path
-
model.model_path
sweep
:
dir
:
/fsx-wav2vec/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname}
subdir
:
'
'
launcher
:
submitit_folder
:
${hydra.sweep.dir}
timeout_min
:
4320
cpus_per_task
:
10
gpus_per_node
:
8
tasks_per_node
:
8
mem_gb
:
0
nodes
:
2
name
:
${env:PREFIX}_${hydra.job.config_name}
partition
:
wav2vec,learnlab,learnfair
max_num_timeout
:
30
examples/data2vec/config/vision/finetuning/run_config/slurm_3.yaml
0 → 100644
View file @
72f5785f
# @package _global_
hydra
:
job
:
config
:
override_dirname
:
kv_sep
:
'
:'
item_sep
:
'
/'
exclude_keys
:
-
run_config
-
distributed_training.distributed_port
-
distributed_training.distributed_world_size
-
model.pretrained_model_path
-
model.target_network_path
-
next_script
-
task.cache_in_scratch
-
task.data
-
checkpoint.save_interval_updates
-
checkpoint.keep_interval_updates
-
checkpoint.save_on_overflow
-
common.log_interval
sweep
:
dir
:
/checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname}
subdir
:
'
'
launcher
:
submitit_folder
:
${hydra.sweep.dir}
timeout_min
:
4320
cpus_per_task
:
80
gpus_per_node
:
8
tasks_per_node
:
1
mem_gb
:
450
nodes
:
3
name
:
${env:PREFIX}_${hydra.job.config_name}
partition
:
devlab,learnlab,learnfair,scavenge
constraint
:
volta32gb,ib4
max_num_timeout
:
30
examples/data2vec/config/vision/finetuning/run_config/slurm_4.yaml
0 → 100644
View file @
72f5785f
# @package _global_
hydra
:
job
:
config
:
override_dirname
:
kv_sep
:
'
:'
item_sep
:
'
/'
exclude_keys
:
-
run_config
-
distributed_training.distributed_port
-
distributed_training.distributed_world_size
-
model.pretrained_model_path
-
model.target_network_path
-
next_script
-
task.cache_in_scratch
-
task.data
-
checkpoint.save_interval_updates
-
checkpoint.keep_interval_updates
-
checkpoint.save_on_overflow
-
common.log_interval
sweep
:
dir
:
/checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname}
subdir
:
'
'
launcher
:
submitit_folder
:
${hydra.sweep.dir}
timeout_min
:
4320
cpus_per_task
:
10
gpus_per_node
:
8
tasks_per_node
:
8
mem_gb
:
450
nodes
:
4
name
:
${env:PREFIX}_${hydra.job.config_name}
partition
:
devlab,learnlab,learnfair,scavenge
constraint
:
volta32gb,ib4
max_num_timeout
:
30
examples/data2vec/config/vision/finetuning/run_config/slurm_4_aws.yaml
0 → 100644
View file @
72f5785f
# @package _global_
hydra
:
job
:
config
:
override_dirname
:
kv_sep
:
'
:'
item_sep
:
'
/'
exclude_keys
:
-
run_config
-
distributed_training.distributed_port
-
distributed_training.distributed_world_size
-
model.pretrained_model_path
-
model.target_network_path
-
next_script
-
task.cache_in_scratch
-
task.data
-
checkpoint.save_interval_updates
-
checkpoint.keep_interval_updates
-
checkpoint.save_on_overflow
-
common.log_interval
-
common.user_dir
sweep
:
dir
:
/checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname}
subdir
:
'
'
launcher
:
submitit_folder
:
${hydra.sweep.dir}
timeout_min
:
4320
cpus_per_task
:
10
gpus_per_node
:
8
tasks_per_node
:
8
mem_gb
:
0
nodes
:
4
name
:
${env:PREFIX}_${hydra.job.config_name}
partition
:
wav2vec,learnlab,learnfair
max_num_timeout
:
30
examples/data2vec/config/vision/finetuning/run_config/slurm_6_aws.yaml
0 → 100644
View file @
72f5785f
# @package _global_
hydra
:
job
:
config
:
override_dirname
:
kv_sep
:
'
:'
item_sep
:
'
/'
exclude_keys
:
-
run_config
-
distributed_training.distributed_port
-
distributed_training.distributed_world_size
-
model.pretrained_model_path
-
model.target_network_path
-
next_script
-
task.cache_in_scratch
-
task.data
-
checkpoint.save_interval_updates
-
checkpoint.keep_interval_updates
-
checkpoint.save_on_overflow
-
common.log_interval
-
common.user_dir
sweep
:
dir
:
/checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}_${hydra.launcher.gpus_per_node}/${hydra.job.override_dirname}
subdir
:
'
'
launcher
:
submitit_folder
:
${hydra.sweep.dir}
timeout_min
:
4320
cpus_per_task
:
10
gpus_per_node
:
8
tasks_per_node
:
8
mem_gb
:
0
nodes
:
6
name
:
${env:PREFIX}_${hydra.job.config_name}
partition
:
wav2vec,learnlab,learnfair
max_num_timeout
:
30
Prev
1
…
11
12
13
14
15
16
17
18
19
…
26
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment