Unverified Commit 94ab9709 authored by 张恒瑞's avatar 张恒瑞 Committed by GitHub
Browse files

[Bugfix] fix a typo in consis_loss function (#2616)

parent ff345c2e
...@@ -61,7 +61,6 @@ Train a model which follows the original hyperparameters on different datasets. ...@@ -61,7 +61,6 @@ Train a model which follows the original hyperparameters on different datasets.
python main.py --dataname cora --gpu 0 --lam 1.0 --tem 0.5 --order 8 --sample 4 --input_droprate 0.5 --hidden_droprate 0.5 --dropnode_rate 0.5 --hid_dim 32 --early_stopping 100 --lr 1e-2 --epochs 2000 python main.py --dataname cora --gpu 0 --lam 1.0 --tem 0.5 --order 8 --sample 4 --input_droprate 0.5 --hidden_droprate 0.5 --dropnode_rate 0.5 --hid_dim 32 --early_stopping 100 --lr 1e-2 --epochs 2000
# Citeseer: # Citeseer:
python main.py --dataname citeseer --gpu 0 --lam 0.7 --tem 0.3 --order 2 --sample 2 --input_droprate 0.0 --hidden_droprate 0.2 --dropnode_rate 0.5 --hid_dim 32 --early_stopping 100 --lr 1e-2 --epochs 2000 python main.py --dataname citeseer --gpu 0 --lam 0.7 --tem 0.3 --order 2 --sample 2 --input_droprate 0.0 --hidden_droprate 0.2 --dropnode_rate 0.5 --hid_dim 32 --early_stopping 100 --lr 1e-2 --epochs 2000
# Pubmed: # Pubmed:
python main.py --dataname pubmed --gpu 0 --lam 1.0 --tem 0.2 --order 5 --sample 4 --input_droprate 0.6 --hidden_droprate 0.8 --dropnode_rate 0.5 --hid_dim 32 --early_stopping 200 --lr 0.2 --epochs 2000 --use_bn python main.py --dataname pubmed --gpu 0 --lam 1.0 --tem 0.2 --order 5 --sample 4 --input_droprate 0.6 --hidden_droprate 0.8 --dropnode_rate 0.5 --hid_dim 32 --early_stopping 200 --lr 0.2 --epochs 2000 --use_bn
``` ```
......
...@@ -58,7 +58,7 @@ def consis_loss(logps, temp, lam): ...@@ -58,7 +58,7 @@ def consis_loss(logps, temp, lam):
sharp_p = (th.pow(avg_p, 1./temp) / th.sum(th.pow(avg_p, 1./temp), dim=1, keepdim=True)).detach() sharp_p = (th.pow(avg_p, 1./temp) / th.sum(th.pow(avg_p, 1./temp), dim=1, keepdim=True)).detach()
sharp_p = sharp_p.unsqueeze(2) sharp_p = sharp_p.unsqueeze(2)
loss = th.mean(th.sum(th.pow(ps - sharp_p, 1./temp), dim = 1, keepdim=True)) loss = th.mean(th.sum(th.pow(ps - sharp_p, 2), dim = 1, keepdim=True))
loss = lam * loss loss = lam * loss
return loss return loss
...@@ -102,7 +102,6 @@ if __name__ == '__main__': ...@@ -102,7 +102,6 @@ if __name__ == '__main__':
test_idx = th.nonzero(test_mask, as_tuple=False).squeeze().to(device) test_idx = th.nonzero(test_mask, as_tuple=False).squeeze().to(device)
# Step 2: Create model =================================================================== # # Step 2: Create model =================================================================== #
model = GRAND(n_features, args.hid_dim, n_classes, args.sample, args.order, model = GRAND(n_features, args.hid_dim, n_classes, args.sample, args.order,
args.dropnode_rate, args.input_droprate, args.dropnode_rate, args.input_droprate,
args.hidden_droprate, args.use_bn) args.hidden_droprate, args.use_bn)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment