Commit 7b5e024d authored by zhangwq5's avatar zhangwq5
Browse files

all

parent 33c0366d
Pipeline #2911 failed with stages
in 0 seconds
# 模型唯一标识
modelCode = 1716
# 模型名称
modelName=powerflownet_pytorch
# 模型描述
modelDescription=powerflownet是一种为电力系统潮流分析量身定制的 “高精度、超快速、高可扩展” 的图神经网络模型。
# 应用场景
appScenario=制造,广媒,电力,能源,医疗
# 框架类型
frameType=pytorch
This diff is collapsed.
"""This package provides Graph Neural Network classes for solving the problem"""
__all__ = ['MPN', 'MPN_simplenet', 'SkipMPN', 'MaskEmbdMPN', 'MultiConvNet']
\ No newline at end of file
import torch
from datasets.PowerFlowData import PowerFlowData
from networks.MPN import MPN,MPN_simplenet
from utils.custom_loss_functions import Masked_L2_loss
import time
from utils.argument_parser import argument_parser
from pygsp import graphs
import numpy as np
from collaborative_filtering import tikhonov_regularizer,collaborative_filtering_testing
"""
This script is used to evaluate the performance of various models on the power flow problem.
Models:
- MPN
- Tikhonov Regularizer
- MLP
- Newton-Raphson method
"""
cases = ['case14','case118','case6470rte']
# cases = ['case6470rte']
for case in cases:
case_name = case.split("case")[1]
print(f'\n\nCase {case_name} is being evaluated...')
#Load testing data
testset = PowerFlowData(root="./data/", case=case_name, split=[.5, .2, .3], task='test')
sample_number = 1000
if sample_number > len(testset):
sample_number = len(testset)
print(f'Number of samples: {sample_number}')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
eval_loss_fn = Masked_L2_loss(regularize=False)
#Load MPN model
model_path = "./models/testing/mpn_" + case_name + ".pt"
MPN_model = MPN(
nfeature_dim=6,
efeature_dim=5,
output_dim=6,
hidden_dim=129,
n_gnn_layers=4,
K=3,
dropout_rate=0.2
).to(device)
_to_load = torch.load(model_path)
MPN_model.load_state_dict(_to_load['model_state_dict'])
MPN_model.eval()
#Get loss of MPN model and execution time
timer_MPN = 0
loss_MPN = 0
for i, sample in enumerate(testset[:sample_number]):
time_start_gnn = time.time()
result = MPN_model(sample.to(device))
time_end_gnn = time.time()
loss_MPN += eval_loss_fn(result, sample.y.to(device), sample.x[:, 10:].to(device)).item()
timer_MPN += time_end_gnn - time_start_gnn
print(f'Loss of MPN model: {loss_MPN/sample_number}')
print(f'Execution time of MPN model: {timer_MPN/sample_number}')
###### Tikhonov Regularizer ##########################
# Load adjacency matrix from file
file_path = "./data/raw/case" + str(case_name) + '_adjacency_matrix.npy'
adjacency_matrix = np.load(file_path)
# print(adjacency_matrix.shape)
num_of_nodes = adjacency_matrix.shape[0]
# print(f'Number of nodes: {num_of_nodes}')
# create graph from adjacency matrix
G = graphs.Graph(adjacency_matrix)
# get incidence matrix
G.compute_differential_operator()
B = G.D.toarray()
# print(f'B: {B.shape}')
# get laplacian matrix
L = G.L.toarray()
# print(f'Laplacian: {L.shape}')
timer_regularizer = 0
loss_MPN = 0
for i, sample in enumerate(testset[:sample_number]):
time_start = time.time()
result = tikhonov_regularizer(1.25, L, sample.x[:,4:8], sample.x[:, 10:].to(device))
# result = collaborative_filtering_testing(sample.x[:,4:8], sample.x[:, 10:14], B,sample.y[:,:4],4)
time_end = time.time()
loss_MPN += eval_loss_fn(result, sample.y[:,:4], sample.x[:, 10:14].to(device)).item()
timer_regularizer += time_end - time_start
print(f'Loss of Tikhonov Regularizer: {loss_MPN/sample_number}')
print(f'Execution time of Tikhonov Regularizer: {timer_regularizer/sample_number}')
###### MLP ##########################
wandb
torch_geometric
\ No newline at end of file
root@nvnode2:/home/zwq/project/PF/PoweFlowNet# python3 train.py --cfg_json ./configs/standard.json\
--num-epochs 10\
--data-dir ./data/
--batch-size 128\
--train_loss_fn mse_loss\
--lr 0.001\
--case 118v2\
--model MaskEmbdMultiMPN\
--save
/usr/local/lib/python3.10/dist-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: '/usr/local/lib/python3.10/dist-packages/torchvision/image.so: undefined symbol: _ZN3c1017RegisterOperatorsD1Ev'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?
warn(
/home/zwq/project/PF/PoweFlowNet/datasets/PowerFlowData.py:125: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
*torch.load(self.processed_paths[0])) # necessary, do not forget!
torch.Size([700000, 9])
Data(x=[700000, 9], edge_index=[2, 1000000], edge_attr=[1000000, 5], y=[700000, 8])
torch.Size([700000, 9])
Data(x=[700000, 9], edge_index=[2, 1000000], edge_attr=[1000000, 5], y=[700000, 8])
torch.Size([700000, 9])
Data(x=[700000, 9], edge_index=[2, 1000000], edge_attr=[1000000, 5], y=[700000, 8])
Total number of parameters: 222273
Training: 100%|████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:21<00:00, 18.30it/s]
Evaluating:: 100%|█████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:13<00:00, 28.45it/s]
Epoch 1 / 10: train_loss=2.0736, val_loss=0.7792, best_val_loss=0.7792
Training: 100%|████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:08<00:00, 44.09it/s]
Evaluating:: 100%|█████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:05<00:00, 67.46it/s]
Epoch 2 / 10: train_loss=0.5806, val_loss=0.2028, best_val_loss=0.2028
Training: 100%|████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:08<00:00, 44.14it/s]
Evaluating:: 100%|█████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:05<00:00, 67.69it/s]
Epoch 3 / 10: train_loss=0.2477, val_loss=0.1484, best_val_loss=0.1484
Training: 100%|████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:08<00:00, 44.58it/s]
Evaluating:: 100%|█████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:05<00:00, 68.32it/s]
Epoch 4 / 10: train_loss=0.1819, val_loss=0.1217, best_val_loss=0.1217
Training: 100%|████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:09<00:00, 42.85it/s]
Evaluating:: 100%|█████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:05<00:00, 68.28it/s]
Epoch 5 / 10: train_loss=0.1486, val_loss=0.1052, best_val_loss=0.1052
Training: 100%|████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:08<00:00, 44.45it/s]
Evaluating:: 100%|█████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:05<00:00, 68.22it/s]
Epoch 6 / 10: train_loss=0.1288, val_loss=0.0943, best_val_loss=0.0943
Training: 100%|████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:09<00:00, 43.17it/s]
Evaluating:: 100%|█████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:05<00:00, 68.33it/s]
Epoch 7 / 10: train_loss=0.1141, val_loss=0.0860, best_val_loss=0.0860
Training: 100%|████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:08<00:00, 44.54it/s]
Evaluating:: 100%|█████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:05<00:00, 68.16it/s]
Epoch 8 / 10: train_loss=0.1042, val_loss=0.0789, best_val_loss=0.0789
Training: 100%|████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:08<00:00, 44.63it/s]
Evaluating:: 100%|█████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:05<00:00, 68.03it/s]
Epoch 9 / 10: train_loss=0.0965, val_loss=0.0736, best_val_loss=0.0736
Training: 100%|████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:08<00:00, 44.35it/s]
Evaluating:: 100%|█████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:05<00:00, 67.79it/s]
Epoch 10 / 10: train_loss=0.0901, val_loss=0.0693, best_val_loss=0.0693
Training Complete. Best validation loss: 0.0693
/home/zwq/project/PF/PoweFlowNet/train.py:186: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
_to_load = torch.load(SAVE_MODEL_PATH)
Evaluating:: 100%|█████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:13<00:00, 28.40it/s]
Test loss: 0.0693
\ No newline at end of file
root@dcunode7:/home/zwq/project/PF/PoweFlowNet# python3 train.py --cfg_json ./configs/standard.json\
--num-epochs 10\
--data-dir ./data/
--batch-size 128\
--train_loss_fn mse_loss\
--lr 0.001\
--case 118v2\
--model MaskEmbdMultiMPN\
--save
/home/zwq/project/PF/PoweFlowNet/datasets/PowerFlowData.py:125: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
*torch.load(self.processed_paths[0])) # necessary, do not forget!
torch.Size([700000, 9])
Data(x=[700000, 9], edge_index=[2, 1000000], edge_attr=[1000000, 5], y=[700000, 8])
torch.Size([700000, 9])
Data(x=[700000, 9], edge_index=[2, 1000000], edge_attr=[1000000, 5], y=[700000, 8])
torch.Size([700000, 9])
Data(x=[700000, 9], edge_index=[2, 1000000], edge_attr=[1000000, 5], y=[700000, 8])
Total number of parameters: 222273
Training: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:25<00:00, 15.54it/s]
Evaluating:: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:16<00:00, 23.09it/s]
Epoch 1 / 10: train_loss=2.0737, val_loss=0.7795, best_val_loss=0.7795
Training: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:12<00:00, 31.85it/s]
Evaluating:: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:07<00:00, 52.58it/s]
Epoch 2 / 10: train_loss=0.5807, val_loss=0.2026, best_val_loss=0.2026
Training: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:12<00:00, 30.50it/s]
Evaluating:: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:07<00:00, 50.65it/s]
Epoch 3 / 10: train_loss=0.2475, val_loss=0.1485, best_val_loss=0.1485
Training: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:12<00:00, 31.01it/s]
Evaluating:: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:08<00:00, 46.77it/s]
Epoch 4 / 10: train_loss=0.1821, val_loss=0.1219, best_val_loss=0.1219
Training: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:12<00:00, 31.77it/s]
Evaluating:: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:08<00:00, 45.30it/s]
Epoch 5 / 10: train_loss=0.1489, val_loss=0.1057, best_val_loss=0.1057
Training: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:10<00:00, 36.25it/s]
Evaluating:: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:08<00:00, 44.02it/s]
Epoch 6 / 10: train_loss=0.1293, val_loss=0.0948, best_val_loss=0.0948
Training: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:11<00:00, 34.66it/s]
Evaluating:: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:07<00:00, 49.79it/s]
Epoch 7 / 10: train_loss=0.1146, val_loss=0.0863, best_val_loss=0.0863
Training: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:12<00:00, 30.27it/s]
Evaluating:: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:07<00:00, 51.53it/s]
Epoch 8 / 10: train_loss=0.1045, val_loss=0.0791, best_val_loss=0.0791
Training: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:12<00:00, 31.05it/s]
Evaluating:: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:07<00:00, 49.16it/s]
Epoch 9 / 10: train_loss=0.0967, val_loss=0.0738, best_val_loss=0.0738
Training: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:11<00:00, 35.21it/s]
Evaluating:: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:08<00:00, 43.57it/s]
Epoch 10 / 10: train_loss=0.0902, val_loss=0.0693, best_val_loss=0.0693
Training Complete. Best validation loss: 0.0693
/home/zwq/project/PF/PoweFlowNet/train.py:186: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
_to_load = torch.load(SAVE_MODEL_PATH)
Evaluating:: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 391/391 [00:15<00:00, 24.56it/s]
Test loss: 0.0693
\ No newline at end of file
"""This program processes (and saves) results of the training. """
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
LOG_DIR = 'logs'
def main():
run_id = '20230628-6515'
TRAIN_LOG_PATH = os.path.join(LOG_DIR, 'train_log/train_log_'+run_id+'.pt')
try:
train_log = torch.load(TRAIN_LOG_PATH, map_location=torch.device('cpu'))
except FileNotFoundError:
print("File not found. terminating program.")
return 1
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(train_log['train']['loss'], label='train')
ax.plot(train_log['val']['loss'], label='val')
if 'test' in train_log.keys():
ax.plot(train_log['test']['loss'], label='test')
ax.set_yscale('log')
ax.set_xlabel('Epoch')
ax.set_ylabel('Loss (log scale)')
ax.set_title('Loss vs Epoch')
ax.legend()
plt.savefig('results/'+run_id+'_loss_vs_epoch.png')
plt.show()
plt.close()
if __name__ == "__main__":
main()
\ No newline at end of file
# for reference, this is one datapoint:
# Data(x=[14, 16], edge_index=[2, 20], edge_attr=[20, 5], y=[14, 6])
# this one gets 0.2595 accuracy after 20 epochs
# WITH THIS ONE USE MODEL MPN_simplenet
python train.py --num-epochs 20\
--batch-size 128\
--lr 0.001\
--case 118\
--nfeature_dim 16\
--efeature_dim 5\
--hidden_dim 64\
--n_gnn_layers 2\
--K 3\
--dropout_rate 0.2\
--model MPN_simplenet\
--regularize=True\
--regularization_coeff=0.2
# for reference, this is one datapoint:
# Data(x=[14, 16], edge_index=[2, 20], edge_attr=[20, 5], y=[14, 6])
# this one gets ??? accuracy after 200 epochs
# WITH THIS ONE USE MODEL MPN
python3 train.py --cfg_json ./configs/large.json\
--num-epochs 2000\
--data-dir ~/data/volume_2/power_flow_dataset\
--batch-size 128\
--train_loss_fn mse_loss\
--lr 0.001\
--case 118v2\
--model MaskEmbdMultiMPN\
--save
\ No newline at end of file
from datetime import datetime
import os
import random
import numpy as np
import torch
from torch_geometric.loader import DataLoader
from tqdm import tqdm
from datasets.PowerFlowData import PowerFlowData
from networks.MPN import MPN, MPN_simplenet
from utils.argument_parser import argument_parser
from utils.evaluation import evaluate_epoch
from utils.custom_loss_functions import Masked_L2_loss
import time
import pandapower as pp
import pickle
def load_cases(path):
# load a pickle file containing the cases
with open(path, 'rb') as f:
cases = pickle.load(f)
return cases
def load_net(sample, net, case_data, solution=None):
# load the data into the pandapower network
net.line['r_ohm_per_km'] = case_data[0]
net.line['x_ohm_per_km'] = case_data[1]
net.load['p_mw'] = case_data[4]
net.load['q_mvar'] = case_data[5]
net.gen['vm_pu'] = case_data[2]
net.gen['p_mw'] = case_data[3]
# instatiate the solver with solutions
if solution is not None:
# print(solution)
net.res_bus['vm_pu'] = solution[:, 0]
net.res_bus['va_degree'] = solution[:, 1]
net.res_bus['p_mw'] = solution[:, 2]
net.res_bus['q_mvar'] = solution[:, 3]
return net
# Step 0: Parse Arguments and Setup
args = argument_parser()
models = {
'MPN': MPN,
'MPN_simplenet': MPN_simplenet,
}
# Training parameters
data_dir = args.data_dir
loss_fn = Masked_L2_loss(regularize=args.regularize,
regcoeff=args.regularization_coeff)
eval_loss_fn = Masked_L2_loss(regularize=False)
scenarios_list = ['case14','case118','case6470rte']
for scenario_index,scenario in enumerate(scenarios_list):
case_name = scenario.split("case")[1]
print(f'\n\nCase {case_name} is being evaluated...')
#Load testing data
testset = PowerFlowData(root="./data/", case=case_name, split=[.5, .2, .3], task='test')
sample_number = 10
if sample_number > len(testset):
sample_number = len(testset)
print(f'Number of samples: {sample_number}')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
eval_loss_fn = Masked_L2_loss(regularize=False)
#Load MPN model
model_path = "./models/testing/mpn_" + case_name + ".pt"
MPN_model = MPN(
nfeature_dim=6,
efeature_dim=5,
output_dim=6,
hidden_dim=129,
n_gnn_layers=4,
K=3,
dropout_rate=0.2
).to(device)
_to_load = torch.load(model_path)
MPN_model.load_state_dict(_to_load['model_state_dict'])
MPN_model.eval()
results = []
time_start_gnn = time.time()
for i, sample in enumerate(testset[:sample_number]):
results.append(MPN_model(sample.to(device)))
time_end_gnn = time.time()
test_set_mean = testset.xymean[0]
test_set_std = testset.xystd[0]
for i in range(len(results)):
results[i] = results[i] * test_set_std + test_set_mean
results[i] = results[i].detach().cpu().numpy()
cases = load_cases("./data/raw/case" + case_name + "_reconstruction_case.pkl")
scenarios = [pp.networks.case14, pp.networks.case118, pp.networks.case6470rte]
algorithms = ["nr", "iwamoto_nr", "gs", "fdbx", "fdxb"]
algorithms = ["nr", "iwamoto_nr"]
algorithms = ["nr"]
results_nr = []
times_auto_init = []
loss_auto_init = 0
# Run the power flow with auto_init
for a in algorithms:
print(f'Auto: Running {a}...')
timer = 0
for i, sample in enumerate(testset[:sample_number]):
net = scenarios[scenario_index]()
net = load_net(sample, net, cases[i])
t0 = time.time()
pp.runpp(net, algorithm=a, init="auto", numba=False)
t1 = time.time()
# gt = sample.y * test_set_std + test_set_mean
# loss_auto_init += eval_loss_fn(torch.tensor(net.res_bus.values), gt[:,:4], sample.x[:,10:14])
result_pf = net.res_bus.values
result_pf = (torch.tensor(result_pf) - test_set_mean[:4]) / test_set_std[:4]
results_nr.append(result_pf)
# loss_auto_init += eval_loss_fn(result_pf, sample.y[:,:4], sample.x[:,10:14]).item()
loss_auto_init += 0
timer += t1 - t0
times_auto_init.append(timer)
# Run the power flow with the results as initial values
times_result_init = []
loss_result_init = 0
for a in algorithms:
print(f'Results: Running {a}...')
timer = 0
for i, sample in enumerate(testset[:sample_number]):
net = scenarios[scenario_index]()
net = load_net(sample, net, cases[i], results[i])
t0 = time.time()
pp.runpp(net, algorithm=a, init="results", numba=False)
t1 = time.time()
result_pf = net.res_bus.values
result_pf = (torch.tensor(result_pf) - test_set_mean[:4]) / test_set_std[:4]
loss_result_init += eval_loss_fn(result_pf, results_nr[i], sample.x[:,10:14]).item()
timer += t1 - t0
times_result_init.append(timer)
# Run the DC power flow
results_dc = []
times_dc = []
loss_dc = 0
for a in algorithms:
print(f'DC: Running {a}...')
timer = 0
for i, sample in enumerate(testset[:sample_number]):
net = scenarios[scenario_index]()
net = load_net(sample, net, cases[i], results[i])
t0 = time.time()
pp.rundcpp(net, algorithm=a, numba=False)
t1 = time.time()
results_dc.append(net.res_bus[["vm_pu", "va_degree", "p_mw", "q_mvar"]].values)
results_dc[i] = (torch.tensor(results_dc[i]) - test_set_mean[:4]) / test_set_std[:4]
# loss_dc += eval_loss_fn(results_dc[i], sample.y[:,:4], sample.x[:,10:14]).item()
loss_dc += eval_loss_fn(results_dc[i], results_nr[i], sample.x[:,10:14]).item()
# loss_dc += eval_loss_fn(torch.tensor(results_dc[i]), gt[:,:4], sample.x[:,10:14]).item()
timer += t1 - t0
# print(results_dc[i], sample.y[:,:4])
times_dc.append(timer)
print("\n\n===========================================")
print("Results with auto_init:\n")
for a in algorithms:
print(f"{a}: {times_auto_init[algorithms.index(a)]/sample_number}")
print(f'Loss auto_init: {loss_auto_init/sample_number}')
print("-------------------------------------------")
print("GNNs: ", (time_end_gnn - time_start_gnn)/sample_number)
print("-------------------------------------------")
print("Results with results init: \n")
for a in algorithms:
print(f"{a}: {times_result_init[algorithms.index(a)]/sample_number}")
print(f'Loss result_init: {loss_result_init/sample_number}')
print("-------------------------------------------")
print("Results DC: \n")
for a in algorithms:
print(f"{a}: {times_dc[algorithms.index(a)]/sample_number}")
print(f'Loss DC: {loss_dc/sample_number}')
print("\n\n===========================================")
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import pandapower.networks as pn\n",
"import pypower.api as pp\n",
"import networkx as nx\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"graphs = [pp.case300(), pp.case118(), pp.case57(), pp.case39()]"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"def analyse_graph(pp_graph, plot=True):\n",
" node_ids = pp_graph['bus'][:,0].astype(np.int64)\n",
" edges = pp_graph['branch'][:,0:2].astype(np.int64)\n",
"\n",
" G = nx.Graph()\n",
" G.add_nodes_from(node_ids)\n",
" for edge in edges:\n",
" G.add_edge(edge[0], edge[1])\n",
"\n",
" degree_sequence = sorted((d for n, d in G.degree()), reverse=True)\n",
" dmax = max(degree_sequence)\n",
"\n",
" if plot:\n",
" fig = plt.figure(\"Degree of a random graph\", figsize=(8, 8))\n",
" # Create a gridspec for adding subplots of different sizes\n",
" axgrid = fig.add_gridspec(5, 4)\n",
"\n",
" ax0 = fig.add_subplot(axgrid[0:3, :])\n",
" Gcc = G.subgraph(sorted(nx.connected_components(G), key=len, reverse=True)[0])\n",
" pos = nx.spring_layout(Gcc, seed=10396953)\n",
" nx.draw_networkx_nodes(Gcc, pos, ax=ax0, node_size=20)\n",
" nx.draw_networkx_edges(Gcc, pos, ax=ax0, alpha=0.4)\n",
" ax0.set_title(\"Connected components of G\")\n",
" ax0.set_axis_off()\n",
"\n",
" ax1 = fig.add_subplot(axgrid[3:, :2])\n",
" ax1.plot(degree_sequence, \"b-\", marker=\"o\")\n",
" ax1.set_title(\"Degree Rank Plot\")\n",
" ax1.set_ylabel(\"Degree\")\n",
" ax1.set_xlabel(\"Rank\")\n",
"\n",
" ax2 = fig.add_subplot(axgrid[3:, 2:])\n",
" ax2.bar(*np.unique(degree_sequence, return_counts=True))\n",
" ax2.set_title(\"Degree histogram\")\n",
" ax2.set_xlabel(\"Degree\")\n",
" ax2.set_ylabel(\"# of Nodes\")\n",
"\n",
" fig.tight_layout()\n",
" plt.show()\n",
" return np.unique(degree_sequence, return_counts=True)\n",
"\n",
"# (array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 11]), array([69, 76, 84, 42, 14, 6, 5, 2, 1, 1], dtype=int64)) \n",
"\n",
"# (array([1, 2, 3, 4, 5, 6, 7, 8, 9]), array([ 7, 56, 19, 15, 11, 6, 2, 1, 1], dtype=int64)) \n",
"\n",
"# (array([1, 2, 3, 4, 5, 6]), array([ 1, 32, 12, 7, 3, 2], dtype=int64)) \n",
"\n",
"# (array([1, 2, 3, 4, 5]), array([ 9, 12, 14, 3, 1], dtype=int64)) "
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0.16731518 0.34241245 0.25097276 0.13035019 0.05642023 0.02723735\n",
" 0.01361868 0.00583658 0.00389105 0. 0.00194553]\n"
]
}
],
"source": [
"def analyse_graphs(graphs):\n",
" degree_frequencies = [analyse_graph(graph, plot=False) for graph in graphs]\n",
" maxdegree = max([np.max(x[0]) for x in degree_frequencies])\n",
" aggregated_degree_frequencies = {d: 0 for d in list(range(1, maxdegree+1))}\n",
" for ds, cs in degree_frequencies:\n",
" for d, c in zip(ds, cs):\n",
" aggregated_degree_frequencies[d] += c\n",
" frequencies = [x for x in aggregated_degree_frequencies.values()]\n",
" # now we need to determine random degree sequences\n",
" print(np.array(frequencies) / sum(frequencies))\n",
"\n",
" \n",
"\n",
"\n",
"analyse_graphs(graphs)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "graphml_proj",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.16"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}
import os
import torch
import torch_geometric
from datasets.PowerFlowData import PowerFlowData
from networks.MPN import MPN, MPN_simplenet, SkipMPN, MaskEmbdMPN, MultiConvNet, MultiMPN, MaskEmbdMultiMPN
from utils.evaluation import load_model
from torch_geometric.loader import DataLoader
from utils.evaluation import evaluate_epoch
from utils.argument_parser import argument_parser
from utils.custom_loss_functions import Masked_L2_loss, PowerImbalance, MixedMSEPoweImbalance
LOG_DIR = 'logs'
SAVE_DIR = 'models'
@torch.no_grad()
def main():
run_id = '20230628-6312'
models = {
'MPN': MPN,
'MPN_simplenet': MPN_simplenet,
'SkipMPN': SkipMPN,
'MaskEmbdMPN': MaskEmbdMPN,
'MultiConvNet': MultiConvNet,
'MultiMPN': MultiMPN,
'MaskEmbdMultiMPN': MaskEmbdMultiMPN
}
args = argument_parser()
batch_size = args.batch_size
grid_case = args.case
data_dir = args.data_dir
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
testset = PowerFlowData(root=data_dir, case=grid_case,
split=[.5, .2, .3], task='test')
test_loader = DataLoader(testset, batch_size=batch_size, shuffle=False)
pwr_imb_loss = PowerImbalance(*testset.get_data_means_stds()).to(device)
mse_loss = torch.nn.MSELoss(reduction='mean').to(device)
masked_l2 = Masked_L2_loss(regularize=False).to(device)
all_losses = {
'PowerImbalance': pwr_imb_loss,
'Masked_L2_loss': masked_l2,
'MSE': mse_loss,
}
# Network Parameters
nfeature_dim = args.nfeature_dim
efeature_dim = args.efeature_dim
hidden_dim = args.hidden_dim
output_dim = args.output_dim
n_gnn_layers = args.n_gnn_layers
conv_K = args.K
dropout_rate = args.dropout_rate
model = models[args.model]
node_in_dim, node_out_dim, edge_dim = testset.get_data_dimensions()
model = model(
nfeature_dim=nfeature_dim,
efeature_dim=efeature_dim,
output_dim=output_dim,
hidden_dim=hidden_dim,
n_gnn_layers=n_gnn_layers,
K=conv_K,
dropout_rate=dropout_rate,
).to(device) # 40k params
model.eval()
model, _ = load_model(model, run_id, device)
print(f"Model: {args.model}")
print(f"Case: {grid_case}")
for name, loss_fn in all_losses.items():
test_loss = evaluate_epoch(model, test_loader, loss_fn, device)
print(f"{name}:\t{test_loss:.4f}")
# sample = testset[10].to(device)
# out = model(sample)
# out = out*sample.x[:,10:]
# input_x = sample.x[:,4:10]*sample.x[:,10:]
# # print(f"Input: {sample*testset.xystd + testset.xymean}")
# # print(f"Output: {out*testset.xystd + testset.xymean}")
# for i in range(sample.x.shape[0]):
# print("=====================================")
# print(f"Actual: {input_x[i,:]}")
# print(f"Predicted: {out[i,:]}")
# print(f"Difference: {input_x[i,:] - out[i,:]}")
if __name__ == "__main__":
main()
from datetime import datetime
import os
import random
import numpy as np
import torch
from torch_geometric.loader import DataLoader
from tqdm import tqdm
from datasets.PowerFlowData import PowerFlowData
from networks.MPN import MPN, MPN_simplenet, SkipMPN, MaskEmbdMPN, MultiConvNet, MultiMPN, MaskEmbdMultiMPN
from utils.argument_parser import argument_parser
from utils.training import train_epoch, append_to_json
from utils.evaluation import evaluate_epoch
from utils.custom_loss_functions import Masked_L2_loss, PowerImbalance, MixedMSEPoweImbalance
import wandb
def main():
# Step 0: Parse Arguments and Setup
args = argument_parser()
run_id = datetime.now().strftime("%Y%m%d") + '-' + str(random.randint(0, 9999))
LOG_DIR = 'logs'
SAVE_DIR = 'models'
TRAIN_LOG_PATH = os.path.join(LOG_DIR, 'train_log/train_log_'+run_id+'.pt')
SAVE_LOG_PATH = os.path.join(LOG_DIR, 'save_logs.json')
SAVE_MODEL_PATH = os.path.join(SAVE_DIR, 'model_'+run_id+'.pt')
models = {
'MPN': MPN,
'MPN_simplenet': MPN_simplenet,
'SkipMPN': SkipMPN,
'MaskEmbdMPN': MaskEmbdMPN,
'MultiConvNet': MultiConvNet,
'MultiMPN': MultiMPN,
'MaskEmbdMultiMPN': MaskEmbdMultiMPN
}
mixed_cases = ['118v2', '14v2']
# Training parameters
data_dir = args.data_dir
nomalize_data = not args.disable_normalize
num_epochs = args.num_epochs
loss_fn = Masked_L2_loss(regularize=args.regularize, regcoeff=args.regularization_coeff)
eval_loss_fn = Masked_L2_loss(regularize=False)
lr = args.lr
batch_size = args.batch_size
grid_case = args.case
# Network parameters
nfeature_dim = args.nfeature_dim
efeature_dim = args.efeature_dim
hidden_dim = args.hidden_dim
output_dim = args.output_dim
n_gnn_layers = args.n_gnn_layers
conv_K = args.K
dropout_rate = args.dropout_rate
model = models[args.model]
log_to_wandb = args.wandb
wandb_entity = args.wandb_entity
if log_to_wandb:
wandb.init(project="PowerFlowNet",
entity=wandb_entity,
name=run_id,
config=args)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.manual_seed(1234)
np.random.seed(1234)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
# Step 1: Load data
trainset = PowerFlowData(root=data_dir, case=grid_case, split=[.5, .2, .3], task='train', normalize=nomalize_data)
valset = PowerFlowData(root=data_dir, case=grid_case, split=[.5, .2, .3], task='val', normalize=nomalize_data)
testset = PowerFlowData(root=data_dir, case=grid_case, split=[.5, .2, .3], task='test', normalize=nomalize_data)
train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(valset, batch_size=batch_size, shuffle=False)
test_loader = DataLoader(testset, batch_size=batch_size, shuffle=False)
## [Optional] physics-informed loss function
if args.train_loss_fn == 'power_imbalance':
# overwrite the loss function
loss_fn = PowerImbalance(*trainset.get_data_means_stds()).to(device)
elif args.train_loss_fn == 'masked_l2':
loss_fn = Masked_L2_loss(regularize=args.regularize, regcoeff=args.regularization_coeff)
elif args.train_loss_fn == 'mixed_mse_power_imbalance':
loss_fn = MixedMSEPoweImbalance(*trainset.get_data_means_stds(), alpha=0.9).to(device)
else:
loss_fn = torch.nn.MSELoss()
# Step 2: Create model and optimizer (and scheduler)
node_in_dim, node_out_dim, edge_dim = trainset.get_data_dimensions()
assert node_in_dim == 16
model = model(
nfeature_dim=nfeature_dim,
efeature_dim=efeature_dim,
output_dim=output_dim,
hidden_dim=hidden_dim,
n_gnn_layers=n_gnn_layers,
K=conv_K,
dropout_rate=dropout_rate
).to(device)
#calculate model size
pytorch_total_params = sum(p.numel() for p in model.parameters())
print("Total number of parameters: ", pytorch_total_params)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
# mode='min',
# factor=0.5,
# patience=5,
# verbose=True)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=lr, steps_per_epoch=len(train_loader), epochs=num_epochs)
# Step 3: Train model
best_train_loss = 10000.
best_val_loss = 10000.
train_log = {
'train': {
'loss': []},
'val': {
'loss': []},
}
# pbar = tqdm(range(num_epochs), total=num_epochs, position=0, leave=True)
for epoch in range(num_epochs):
train_loss = train_epoch(
model, train_loader, loss_fn, optimizer, device)
val_loss = evaluate_epoch(model, val_loader, eval_loss_fn, device)
scheduler.step()
train_log['train']['loss'].append(train_loss)
train_log['val']['loss'].append(val_loss)
if log_to_wandb:
wandb.log({'train_loss': train_loss,
'val_loss': val_loss})
if train_loss < best_train_loss:
best_train_loss = train_loss
if val_loss < best_val_loss:
best_val_loss = val_loss
if args.save:
_to_save = {
'epoch': epoch,
'args': args,
'val_loss': best_val_loss,
'model_state_dict': model.state_dict(),
}
os.makedirs('models', exist_ok=True)
torch.save(_to_save, SAVE_MODEL_PATH)
append_to_json(
SAVE_LOG_PATH,
run_id,
{
'val_loss': f"{best_val_loss: .4f}",
# 'test_loss': f"{test_loss: .4f}",
'train_log': TRAIN_LOG_PATH,
'saved_file': SAVE_MODEL_PATH,
'epoch': epoch,
'model': args.model,
'train_case': args.case,
'train_loss_fn': args.train_loss_fn,
'args': vars(args)
}
)
save_dir = os.path.dirname(TRAIN_LOG_PATH)
# Check if the directory exists, create it if it doesn't
if save_dir and not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(train_log, TRAIN_LOG_PATH)
print(f"Epoch {epoch+1} / {num_epochs}: train_loss={train_loss:.4f}, val_loss={val_loss:.4f}, best_val_loss={best_val_loss:.4f}")
print(f"Training Complete. Best validation loss: {best_val_loss:.4f}")
# Step 4: Evaluate model
if args.save:
_to_load = torch.load(SAVE_MODEL_PATH)
model.load_state_dict(_to_load['model_state_dict'])
test_loss = evaluate_epoch(model, test_loader, eval_loss_fn, device)
print(f"Test loss: {best_val_loss:.4f}")
if log_to_wandb:
wandb.log({'test_loss', test_loss})
# Step 5: Save results
os.makedirs(os.path.join(LOG_DIR, 'train_log'), exist_ok=True)
if args.save:
torch.save(train_log, TRAIN_LOG_PATH)
if __name__ == '__main__':
main()
"""This package provides functions for training, evaluaiton, visualization, etc."""
__all__ = ['argument_parser', 'training', 'evaluation']
\ No newline at end of file
import argparse
import os
import json
def argument_parser():
# config_parser = argparse.ArgumentParser(description='Argument parser for the project')
config_parser = argparse.ArgumentParser(
prog='PowerFlowNet',
description='parse json configs',
add_help=False) # a must because otherwise the child will have two help options
config_parser.add_argument('--cfg_json','--config','--configs', default='configs/standard.json',type=str)
parser = argparse.ArgumentParser(parents=[config_parser])
parser = argparse.ArgumentParser(
prog='PowerFlowNet',
description='train neural network for power flow approximation'
)
# Network Parameters
parser.add_argument('--nfeature_dim', type=int, default=6, help='Number of node features')
parser.add_argument('--efeature_dim', type=int, default=2, help='Number of edge features')
parser.add_argument('--hidden_dim', type=int, default=128, help='Number of hidden features')
parser.add_argument('--output_dim', type=int, default=6, help='Number of output features')
parser.add_argument('--n_gnn_layers', type=int, default=4, help='Number of GNN layers')
parser.add_argument('--K', type=int, default=3, help='Number of conv filter taps')
parser.add_argument('--dropout_rate', type=float, default=0.2, help='Dropout rate')
parser.add_argument('--model', type=str, default='MPN', help='Dropout rate')
parser.add_argument('--regularize', type=bool, default=True, help='whether the loss function,\
during training, will make the masked parts of the output part of the loss function')
parser.add_argument('--regularization_coeff', type=float, default=1.0, help='Dropout rate')
# Training parameters
parser.add_argument('--data-dir', type=str, default='data', help='Path to data directory')
parser.add_argument('--disable_normalize', default=False, action=argparse.BooleanOptionalAction, help='Disable normalizing data')
parser.add_argument('--train_loss_fn',
type=str, default='masked_l2',
choices=['masked_l2', 'power_imbalance', 'mse_loss', 'mixed_mse_power_imbalance'],
help='Training loss function')
parser.add_argument('--num-epochs', type=int, default=100, help='Number of epochs to train for')
parser.add_argument('--batch-size', type=int, default=128, help='Batch size')
parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate')
parser.add_argument('--case', type=str, default='14', help='Grid case')
parser.add_argument('--wandb', default=False, help='Enable wandb logging',action=argparse.BooleanOptionalAction)
parser.add_argument('--wandb-entity', type=str, default='PowerFlowNet', help='wandb entity')
parser.add_argument('--save', default=True, action=argparse.BooleanOptionalAction)
# Step 0: Parse arguments in .json if specified
# Step 0.1 Check if .json file is specified
# Step 0.2 Parse whatever is in .json file
args, left_argv = config_parser.parse_known_args() # if passed args BESIDES defined in cfg_parser, store in left_argv
if args.cfg_json is not None:
with open(args.cfg_json) as f:
json_dict = json.load(f)
# args.__dict__.update(json_dict) # does not guarantee arg format is correct
json_argv = []
for key, value in json_dict.items():
json_argv.append('--' + key)
json_argv.append(str(value))
parser.parse_known_args(json_argv, args)
# Step 1: Parse arguments in command line and override .json values
parser.parse_args(left_argv, args) # override JSON values with command-line values
return args
\ No newline at end of file
import torch.nn as nn
import torch
from torchvision import datasets, transforms
from torch import nn
import torch.nn.functional as F
from torch_geometric.nn import MessagePassing
import networkx
class Masked_L2_loss(nn.Module):
"""
Custom loss function for the masked L2 loss.
Args:
output (torch.Tensor): The output of the neural network model.
target (torch.Tensor): The target values.
mask (torch.Tensor): The mask for the target values.
Returns:
torch.Tensor: The masked L2 loss.
"""
def __init__(self, regularize=True, regcoeff=1):
super(Masked_L2_loss, self).__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.regularize = regularize
self.regcoeff = regcoeff
def forward(self, output, target, mask):
masked = mask.type(torch.bool)
# output = output * mask
# target = target * mask
outputl = torch.masked_select(output, masked)
targetl = torch.masked_select(target, masked)
loss = self.criterion(outputl, targetl)
if self.regularize:
masked = (1 - mask).type(torch.bool)
output_reg = torch.masked_select(output, masked)
target_reg = torch.masked_select(target, masked)
loss = loss + self.regcoeff * self.criterion(output_reg, target_reg)
return loss
class PowerImbalance(MessagePassing):
"""Power Imbalance Loss Class
Arguments:
xymean: mean of the node features
xy_std: standard deviation of the node features
reduction: (str) 'sum' or 'mean' (node/batch-wise). P and Q are always added.
Input:
x: node features -- (N, 6)
edge_index: edge index -- (2, num_edges)
edge_attr: edge features-- (num_edges, 2)
"""
base_sn = 100 # kva
base_voltage = 345 # kv
base_ohm = 1190.25 # v**2/sn
def __init__(self, xymean, xystd, edgemean, edgestd, reduction='mean'):
super().__init__(aggr='add', flow='target_to_source')
if xymean.shape[0] > 1:
xymean = xymean[0:1]
if xystd.shape[0] > 1:
xystd = xystd[0:1]
self.xymean = xymean
self.xystd = xystd
self.edgemean = edgemean
self.edgestd = edgestd
def de_normalize(self, x, edge_attr):
self.xymean = self.xymean.to(x.device)
self.xystd = self.xystd.to(x.device)
self.edgemean = self.edgemean.to(x.device)
self.edgestd = self.edgestd.to(x.device)
return x * self.xystd + self.xymean, edge_attr * self.edgestd + self.edgemean
def is_directed(self, edge_index):
'determine if a graph id directed by reading only one edge'
return edge_index[0,0] not in edge_index[1,edge_index[0,:] == edge_index[1,0]]
def undirect_graph(self, edge_index, edge_attr):
"""transform a directed graph (index, attr) into undirect by duplicating and reversing the directed edges
Arguments:
edge_index -- shape (2, E)
edge_attr -- shape (E, fe)
"""
edge_index_dup = torch.stack(
[edge_index[1,:], edge_index[0,:]],
dim = 0
) # (2, E)
edge_index = torch.cat(
[edge_index, edge_index_dup],
dim = 1
) # (2, 2*E)
edge_attr = torch.cat(
[edge_attr, edge_attr],
dim = 0
) # (2*E, fe)
return edge_index, edge_attr
def message(self, x_i, x_j, edge_attr):
"""calculate injected power Pji
Formula:
$$
P_{ji} = V_m^i*V_m^j*Y_{ij}*\cos(V_a^i-V_a^j-\theta_{ij})
-(V_m^i)^2*Y_{ij}*\cos(-\theta_{ij})
$$
$$
Q_{ji} = V_m^i*V_m^j*Y_{ij}*\sin(V_a^i-V_a^j-\theta_{ij})
-(V_m^i)^2*Y_{ij}*\sin(-\theta_{ij})
$$
Input:
x_i: (num_edges, 6)
x_j: (num_edges, 6)
edge_attr: (num_edges, 2)
Return:
Pji|Qji: (num_edges, 2)
"""
r_x = edge_attr[:, 0:2] # (num_edges, 2)
r, x = r_x[:, 0:1], r_x[:, 1:2]
# zm_ij = torch.norm(r_x, p=2, dim=-1, keepdim=True) # (num_edges, 1) NOTE (r**2+x**2)**0.5 should be non-zero
# za_ij = torch.acos(edge_attr[:, 0:1] / zm_ij) # (num_edges, 1)
# ym_ij = 1/(zm_ij + 1e-6) # (num_edges, 1)
# ya_ij = -za_ij # (num_edges, 1)
# g_ij = ym_ij * torch.cos(ya_ij) # (num_edges, 1)
# b_ij = ym_ij * torch.sin(ya_ij) # (num_edges, 1)
g_ij = r / (r**2 + x**2)
b_ij = -x / (r**2 + x**2)
ym_ij = torch.sqrt(g_ij**2+b_ij**2)
ya_ij = torch.acos(g_ij/ym_ij)
vm_i = x_i[:, 0:1] # (num_edges, 1)
va_i = 1/180.*torch.pi*x_i[:, 1:2] # (num_edges, 1)
vm_j = x_j[:, 0:1] # (num_edges, 1)
va_j = 1/180.*torch.pi*x_j[:, 1:2] # (num_edges, 1)
e_i = vm_i * torch.cos(va_i)
f_i = vm_i * torch.sin(va_i)
e_j = vm_j * torch.cos(va_j)
f_j = vm_j * torch.sin(va_j)
####### my (incomplete) method #######
# Pji = vm_i * vm_j * ym_ij * torch.cos(va_i - va_j - ya_ij) \
# - vm_i**2 * ym_ij * torch.cos(-ya_ij)
# Qji = vm_i * vm_j * ym_ij * torch.sin(va_i - va_j - ya_ij) \
# - vm_i**2 * ym_ij * torch.sin(-ya_ij)
####### standard method #######
# cannot be done since there's not complete information about whole neighborhood.
####### another reference method #######
# Pji = vm_i * vm_j * (g_ij*torch.cos(va_i-va_j)+b_ij*torch.sin(va_i-va_j))
# Qji = vm_i * vm_j * (g_ij*torch.sin(va_i-va_j)-b_ij*torch.cos(va_i-va_j))
####### reference method 3 #######
# Pji = g_ij*(vm_i**2 - vm_i*vm_j*torch.cos(va_i-va_j)) \
# - b_ij*(vm_i*vm_j*torch.sin(va_i-va_j))
# Qji = b_ij*(- vm_i**2 + vm_i*vm_j*torch.cos(va_i-va_j)) \
# - g_ij*(vm_i*vm_j*torch.sin(va_i-va_j))
###### another mine ######
Pji = g_ij*(e_i*e_j-e_i**2+f_i*f_j-f_i**2) + b_ij*(f_i*e_j-e_i*f_j)
Qji = g_ij*(f_i*e_j-e_i*f_j) + b_ij*(-e_i*e_j+e_i**2-f_i*f_j+f_i**2)
# --- DEBUG ---
# self._dPQ = torch.cat([Pji, Qji], dim=-1) # (num_edges, 2)
# --- DEBUG ---
return torch.cat([Pji, Qji], dim=-1) # (num_edges, 2)
def update(self, aggregated, x):
"""calculate power imbalance at each node
Arguments:
aggregated -- output of aggregation, (num_nodes, 2)
x -- node features (num_nodes, 6)
Return:
dPi|dQi: (num_nodes, 2)
Formula:
$$
\Delta P_i = \sum_{j\in N_i} P_{ji} - P_{ij}
$$
"""
# TODO check if the aggregated result is correct
# --- DEBUG ---
# self.node_dPQ = self._is_i.float() @ self._dPQ # correct, gecontroleerd.
# --- DEBUG ---
dPi = - aggregated[:, 0:1] + x[:, 2:3] # (num_nodes, 1)
dQi = - aggregated[:, 1:2] + x[:, 3:4] # (num_nodes, 1)
return torch.cat([dPi, dQi], dim=-1) # (num_nodes, 2)
def forward(self, x, edge_index, edge_attr):
"""calculate power imbalance at each node
Arguments:
x -- _description_
edge_index -- _description_
edge_attr -- _description_
Return:
dPQ: torch.float
Formula:
$$
\Delta P_i = \sum_{j\in N_i} P_{ji} - P_{ij}
$$
"""
if self.is_directed(edge_index):
edge_index, edge_attr = self.undirect_graph(edge_index, edge_attr)
x, edge_attr = self.de_normalize(x, edge_attr) # correct, gecontroleerd.
# --- per unit ---
# edge_attr[:, 0:2] = edge_attr[:, 0:2]/self.base_ohm
# x[:, 2:4] = x[:, 2:4]/self.base_sn
# --- DEBUG ---
# self._edge_index = edge_index
# self._is_i = torch.arange(14).view((14,1)).expand((14, 20)).long() == edge_index[0:1,:]
# self._is_j = torch.arange(14).view((14,1)).expand((14, 20)).long() == edge_index[1:2,:]
# --- DEBUG ---
dPQ = self.propagate(edge_index, x=x, edge_attr=edge_attr) # (num_nodes, 2)
dPQ = dPQ.square().sum(dim=-1) # (num_nodes, 1)
mean_dPQ = dPQ.mean()
return mean_dPQ
class MixedMSEPoweImbalance(nn.Module):
"""mixed mse and power imbalance loss
loss = alpha * mse_loss + (1-alpha) * power_imbalance_loss
"""
def __init__(self, xymean, xystd, edgemean, edgestd, alpha=0.5, reduction='mean'):
super().__init__()
assert alpha <= 1. and alpha >= 0
self.power_imbalance = PowerImbalance(xymean, xystd, edgemean, edgestd, reduction)
self.mse_loss_fn = nn.MSELoss(reduction=reduction)
self.alpha = alpha
def forward(self, x, edge_index, edge_attr, y):
power_imb_loss = self.power_imbalance(x, edge_index, edge_attr)
mse_loss = self.mse_loss_fn(x, y)
loss = self.alpha * mse_loss + (1-self.alpha) * 0.020*power_imb_loss
return loss
def main():
# TODO import trainset, select an data.y, calculate the imbalance
# trainset = PowerFlowData(root='~/data/volume_2/power_flow_dataset', case='14', split=[.5, .3, .2], task='train')
# sample = trainset[3]
loss_fn = PowerImbalance(0, 1)
x = torch.arange(18).reshape((3, 6)).float()
edge_index = torch.tensor([
[0, 1, 1, 2],
[1, 0, 2, 1]
]).long()
edge_attr = torch.tensor([
[1, 0],
[2, 0],
[3, 0],
[4, 0]
]).float()
loss = loss_fn(x, edge_index, edge_attr)
# loss = loss_fn(sample.y, sample.edge_index, sample.edge_attr)
print(loss)
if __name__ == '__main__':
main()
\ No newline at end of file
"""This module provides functions for
- evaluation_epoch - evaluate performance over a whole epoch
- other evaluation metrics function [NotImplemented]
"""
from typing import Callable, Optional, Union, Tuple
import os
import torch
from torch_geometric.loader import DataLoader
from torch.optim.optimizer import Optimizer
import torch.nn as nn
from tqdm import tqdm
from utils.custom_loss_functions import Masked_L2_loss, PowerImbalance, MixedMSEPoweImbalance
LOG_DIR = 'logs'
SAVE_DIR = 'models'
def load_model(
model: nn.Module,
run_id: str,
device: Union[str, torch.device]
) -> Tuple[nn.Module, dict]:
SAVE_MODEL_PATH = os.path.join(SAVE_DIR, 'model_'+run_id+'.pt')
if type(device) == str:
device = torch.device(device)
try:
saved = torch.load(SAVE_MODEL_PATH, map_location=device)
model.load_state_dict(saved['model_state_dict'])
except FileNotFoundError:
print("File not found. Could not load saved model.")
return -1
return model, saved
def num_params(model: nn.Module) -> int:
"""
Returns the number of trainable parameters in a neural network model.
Args:
model (nn.Module): The neural network model.
Returns:
int: The number of trainable parameters in the model.
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
@torch.no_grad()
def evaluate_epoch(
model: nn.Module,
loader: DataLoader,
loss_fn: Callable,
device: str = 'cpu') -> float:
"""
Evaluates the performance of a trained neural network model on a dataset using the specified data loader.
Args:
model (nn.Module): The trained neural network model to be evaluated.
loader (DataLoader): The PyTorch Geometric DataLoader containing the evaluation data.
device (str): The device used for evaluating the model (default: 'cpu').
Returns:
float: The mean loss value over all the batches in the DataLoader.
"""
model.eval()
total_loss = 0.
num_samples = 0
pbar = tqdm(loader, total=len(loader), desc='Evaluating:')
for data in pbar:
data = data.to(device)
out = model(data)
if isinstance(loss_fn, Masked_L2_loss):
loss = loss_fn(out, data.y, data.x[:, 10:])
elif isinstance(loss_fn, PowerImbalance):
# have to mask out the non-predicted values, otherwise
# the network can learn to predict full-zeros
masked_out = out*data.x[:, 10:] \
+ data.x[:, 4:10]*(1-data.x[:, 10:])
loss = loss_fn(masked_out, data.edge_index, data.edge_attr)
# loss = loss_fn(data.y, data.edge_index, data.edge_attr)
elif isinstance(loss_fn, MixedMSEPoweImbalance):
loss = loss_fn(out, data.edge_index, data.edge_attr, data.y)
else:
loss = loss_fn(out, data.y)
num_samples += len(data)
total_loss += loss.item() * len(data)
mean_loss = total_loss / num_samples
return mean_loss
from typing import Callable, Optional, List, Tuple, Union
import os
import json
import torch
from torch_geometric.loader import DataLoader
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import LRScheduler
import torch.nn as nn
from tqdm import tqdm
from utils.custom_loss_functions import Masked_L2_loss, PowerImbalance, MixedMSEPoweImbalance
def append_to_json(log_path, run_id, result):
log_entry = {str(run_id): result}
os.makedirs(os.path.dirname(log_path), exist_ok=True)
file_exists = os.path.isfile(log_path)
try:
with open(log_path, "r") as json_file:
exist_log = json.load(json_file)
except FileNotFoundError:
exist_log = {}
with open(log_path, "w") as json_file:
exist_log.update(log_entry)
json.dump(exist_log, json_file, indent=4)
def train_epoch(
model: nn.Module,
loader: DataLoader,
loss_fn: Callable,
optimizer: Optimizer,
device: torch.device
) -> float:
"""
Trains a neural network model for one epoch using the specified data loader and optimizer.
Args:
model (nn.Module): The neural network model to be trained.
loader (DataLoader): The PyTorch Geometric DataLoader containing the training data.
optimizer (torch.optim.Optimizer): The PyTorch optimizer used for training the model.
device (str): The device used for training the model (default: 'cpu').
Returns:
float: The mean loss value over all the batches in the DataLoader.
"""
model = model.to(device)
total_loss = 0.
num_samples = 0
model.train()
pbar = tqdm(loader, total=len(loader), desc='Training')
for data in pbar:
data = data.to(device)
optimizer.zero_grad()
out = model(data) # (N, 6), care about the first four.
# data.y.shape == (N, 6)
if isinstance(loss_fn, Masked_L2_loss):
loss = loss_fn(out, data.y, data.x[:, 10:])
elif isinstance(loss_fn, PowerImbalance):
# have to mask out the non-predicted values, otherwise
# the network can learn to predict full-zeros
masked_out = out*data.x[:, 10:] \
+ data.x[:, 4:10]*(1-data.x[:, 10:])
loss = loss_fn(masked_out, data.edge_index, data.edge_attr)
elif isinstance(loss_fn, MixedMSEPoweImbalance):
loss = loss_fn(out, data.edge_index, data.edge_attr, data.y)
else:
loss = loss_fn(out, data.y)
loss.backward()
optimizer.step()
num_samples += len(data)
total_loss += loss.item() * len(data)
mean_loss = total_loss / num_samples
return mean_loss
def main():
log_path = 'logs/save_logs.json'
run_id = 'arb_id_01'
result = {
'train_loss': 0.3,
'val_loss': 0.2,
}
append_to_json(log_path, run_id, result)
if __name__ == '__main__':
main()
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment