Commit e8f97a1d authored by guptapriya's avatar guptapriya
Browse files

Change batch size and epochs for NCF benchmarks

Current batch size 160000 does not converge to the desired HR. So we decrease to 99k which is known to converge. Tested locally and got to 63.5 at epoch 7. Also decreasing number of epochs as I don't see any improvement after epoch 7-8.
parent f06b5716
...@@ -81,9 +81,9 @@ class KerasNCFRealData(KerasNCFBenchmarkBase): ...@@ -81,9 +81,9 @@ class KerasNCFRealData(KerasNCFBenchmarkBase):
default_flags = {} default_flags = {}
default_flags['dataset'] = 'ml-20m' default_flags['dataset'] = 'ml-20m'
default_flags['num_gpus'] = 1 default_flags['num_gpus'] = 1
default_flags['train_epochs'] = 14 default_flags['train_epochs'] = 8
default_flags['clean'] = True default_flags['clean'] = True
default_flags['batch_size'] = 160000 default_flags['batch_size'] = 99000
default_flags['learning_rate'] = 0.00382059 default_flags['learning_rate'] = 0.00382059
default_flags['beta1'] = 0.783529 default_flags['beta1'] = 0.783529
default_flags['beta2'] = 0.909003 default_flags['beta2'] = 0.909003
...@@ -138,8 +138,8 @@ class KerasNCFSyntheticData(KerasNCFBenchmarkBase): ...@@ -138,8 +138,8 @@ class KerasNCFSyntheticData(KerasNCFBenchmarkBase):
default_flags = {} default_flags = {}
default_flags['dataset'] = 'ml-20m' default_flags['dataset'] = 'ml-20m'
default_flags['num_gpus'] = 1 default_flags['num_gpus'] = 1
default_flags['train_epochs'] = 14 default_flags['train_epochs'] = 8
default_flags['batch_size'] = 160000 default_flags['batch_size'] = 99000
default_flags['learning_rate'] = 0.00382059 default_flags['learning_rate'] = 0.00382059
default_flags['beta1'] = 0.783529 default_flags['beta1'] = 0.783529
default_flags['beta2'] = 0.909003 default_flags['beta2'] = 0.909003
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment