Commit dfb907fe authored by Neel Kant's avatar Neel Kant
Browse files

Correct indexer seq length

parent ab5c4f92
...@@ -252,7 +252,7 @@ def get_ict_dataset(use_titles=True): ...@@ -252,7 +252,7 @@ def get_ict_dataset(use_titles=True):
data_prefix=args.data_path, data_prefix=args.data_path,
num_epochs=1, num_epochs=1,
max_num_samples=None, max_num_samples=None,
max_seq_length=288, # doesn't matter max_seq_length=args.seq_length,
short_seq_prob=0.0001, # doesn't matter short_seq_prob=0.0001, # doesn't matter
seed=1, seed=1,
query_in_block_prob=1, query_in_block_prob=1,
......
...@@ -392,7 +392,7 @@ def train(forward_step_func, model, optimizer, lr_scheduler, ...@@ -392,7 +392,7 @@ def train(forward_step_func, model, optimizer, lr_scheduler,
recv_handle = torch.distributed.broadcast(INDEX_READY, args.max_training_rank, group=get_gloo_comm_group(), async_op=True) recv_handle = torch.distributed.broadcast(INDEX_READY, args.max_training_rank, group=get_gloo_comm_group(), async_op=True)
last_reload_iteration = iteration last_reload_iteration = iteration
while iteration < args.train_iters: while iteration < args.train_iters:
if iteration >= last_reload_iteration + 500 and not recv_handle.is_completed(): if args.max_training_rank is not None and iteration >= last_reload_iteration + 500 and not recv_handle.is_completed():
time.sleep(5) time.sleep(5)
continue continue
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment