"git@developer.sourcefind.cn:OpenDAS/fairseq.git" did not exist on "e1f49695eee076a3f35e4ce9eebea9bc6c515780"
Unverified Commit ed15b471 authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

Update (#3916)

parent dc5035b1
......@@ -64,7 +64,7 @@ generate a configurate file `cora_sage.yaml` which includes:
Different choices of task, model and datasets may give very different options,
so DGL-Go also adds a comment for what each option does in the file.
At this point you can also change options to explore optimization potentials.
At this point you can also change options to explore optimization potentials.
Below shows the configuration file generated by the command above.
......@@ -108,7 +108,7 @@ dgl recipe list
will list the available recipes:
```
➜ dgl recipe list
➜ dgl recipe list
===============================================================================
| Filename | Pipeline | Dataset |
===============================================================================
......@@ -163,7 +163,7 @@ That's all! Basically you only need two commands to train a graph neural network
That's not everything yet. You may want to open the hood and and invoke deeper
customization. DGL-Go can export a **self-contained, reproducible** Python
script for you to do anything you like.
script for you to do anything you like.
Try `dgl export --cfg cora_sage.yaml --output script.py`,
and you'll get the script used to train the model. Here's the code snippet:
......@@ -319,7 +319,7 @@ def main():
**pipeline_cfg["optimizer"])
# train
test_acc = train(cfg, pipeline_cfg, device, data, model, optimizer, loss)
torch.save(model, pipeline_cfg["save_path"])
torch.save(model.state_dict(), pipeline_cfg["save_path"])
return test_acc
...
......
......@@ -27,7 +27,7 @@ class Model(nn.Module):
eid_dataloader = DataLoader(
range(
src.shape[-1]),
batch_size=self.eval_batch_size)
batch_size=self.eval_batch_size)
score_list = []
for eids in eid_dataloader:
score = self.edge_model(h[src[eids]], h[dst[eids]])
......@@ -56,7 +56,7 @@ def train(cfg, pipeline_cfg, device, dataset, model, optimizer, loss_fcn):
neg_score = model.edge_model(h[neg_src], h[neg_dst])
loss = loss_fcn(torch.cat([pos_score, neg_score]), torch.cat(
[torch.ones_like(pos_score), torch.zeros_like(neg_score)]))
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
......@@ -78,7 +78,7 @@ def train(cfg, pipeline_cfg, device, dataset, model, optimizer, loss_fcn):
print("Epoch {:05d} | Val Hits@50 {:.4f}".format(epoch, val_hits))
with torch.no_grad():
model.eval()
model.eval()
test_pos_edge, test_neg_edges = dataset.test_edges
pos_result = model.inference(train_g, node_feat, test_pos_edge)
neg_result = model.inference(train_g, node_feat, test_neg_edges)
......@@ -112,7 +112,7 @@ def main():
loss = torch.nn.{{ loss }}()
optimizer = torch.optim.Adam(params, **pipeline_cfg["optimizer"])
test_hits = train(cfg, pipeline_cfg, device, dataset, model, optimizer, loss)
torch.save(model, pipeline_cfg["save_path"])
torch.save(model.state_dict(), pipeline_cfg["save_path"])
return test_hits
if __name__ == '__main__':
......
......@@ -76,9 +76,9 @@ def train(cfg, pipeline_cfg, device, data, model, optimizer, loss_fcn):
train_acc = accuracy(logits[train_mask], label[train_mask])
if epoch != 0 and epoch % pipeline_cfg['eval_period'] == 0:
val_acc = accuracy(logits[val_mask], label[val_mask])
{% if user_cfg.general_pipeline.early_stop %}
if stopper.step(val_acc, model):
if stopper.step(val_acc, model):
break
{% endif %}
print("Epoch {:05d} | Loss {:.4f} | TrainAcc {:.4f} | ValAcc {:.4f}".
......@@ -112,7 +112,7 @@ def main():
optimizer = torch.optim.{{ user_cfg.general_pipeline.optimizer.name }}(model.parameters(), **pipeline_cfg["optimizer"])
# train
test_acc = train(cfg, pipeline_cfg, device, data, model, optimizer, loss)
torch.save(model, pipeline_cfg["save_path"])
torch.save(model.state_dict(), pipeline_cfg["save_path"])
return test_acc
if __name__ == '__main__':
......
......@@ -102,7 +102,7 @@ def train(cfg, pipeline_cfg, device, data, model, optimizer, loss_fcn):
num_workers=pipeline_cfg["sampler"]["num_workers"])
{% if user_cfg.early_stop %}
stopper = EarlyStopping(pipeline_cfg['patience'], pipeline_cfg['checkpoint_path'])
stopper = EarlyStopping(pipeline_cfg['patience'], pipeline_cfg['checkpoint_path'])
{% endif %}
val_acc = 0.
for epoch in range(pipeline_cfg['num_epochs']):
......@@ -119,16 +119,16 @@ def train(cfg, pipeline_cfg, device, data, model, optimizer, loss_fcn):
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_acc = accuracy(batch_pred, batch_labels)
print("Epoch {:05d} | Step {:05d} | Loss {:.4f} | TrainAcc {:.4f}".
train_acc = accuracy(batch_pred, batch_labels)
print("Epoch {:05d} | Step {:05d} | Loss {:.4f} | TrainAcc {:.4f}".
format(epoch, step, loss.item(), train_acc))
if epoch % pipeline_cfg["eval_period"] == 0 and epoch != 0:
val_acc = evaluate(model, val_g, val_nfeat, val_labels, val_nid, cfg["eval_device"])
print('Eval Acc {:.4f}'.format(val_acc))
{% if user_cfg.early_stop %}
if stopper.step(val_acc, model):
if stopper.step(val_acc, model):
break
{% endif %}
......@@ -158,7 +158,7 @@ def main():
loss = torch.nn.{{ user_cfg.general_pipeline.loss }}()
optimizer = torch.optim.{{ user_cfg.general_pipeline.optimizer.name }}(model.parameters(), **pipeline_cfg["optimizer"])
test_acc = train(cfg, pipeline_cfg, device, data, model, optimizer, loss)
torch.save(model, pipeline_cfg["save_path"])
torch.save(model.state_dict(), pipeline_cfg["save_path"])
return test_acc
if __name__ == '__main__':
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment