Commit 7b5e024d authored by zhangwq5's avatar zhangwq5
Browse files

all

parent 33c0366d
Pipeline #2911 failed with stages
in 0 seconds
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# Numpy datasets
data/
# VSCode
.DS_Store
.vscode/
# Saved models and logs
logs/
models/
results/
wandb/
outtest.txt
runs.bat
MIT License
Copyright (c) 2023 Stavros Orfanoudakis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
# PowerFlowNet_pytorch
## 论文
`PowerFlowNet: Power flow approximation using message passing Graph Neural Networks`
- https://www.sciencedirect.com/science/article/pii/S0142061524003338
Leveraging Message Passing GNNs for High-Quality Power Flow Approximation.
\ No newline at end of file
## 模型结构
利用消息传递图神经网络进行高质量电力流近似计算。
与现有的 PF GNN 方法相比,PowerFlowNet 的独特之处在于它巧妙地利用了消息传递 GNN 和高阶 GCN 的功能,并以一种名为 PowerFlowConv 的独特方式组合使用,以处理网络图的可训练掩码嵌入。这种创新方法使 PowerFlowNet 具有极高的扩展性,为 PF 问题提供了一种有效的解决方案。
<div align=center>
<img src="./doc/arch_PF.png"/>
</div>
## 算法原理
PowerFlowNet 将潮流问题转化为一个图神经网络节点回归问题,它将每个母线表示为一个节点,将每条输电线路表示为一条边,同时保持网络的连通性。
<div align=center>
<img src="./doc/arch_PF1.png"/>
</div>
## 环境配置
### 硬件需求
DCU型号:K100_AI,节点数量:1台,卡数:1张。
### Docker(方法一)
```bash
docker pull image.sourcefind.cn:5000/dcu/admin/base/pytorch:2.4.1-ubuntu22.04-dtk25.04.1-py3.10
docker run -it --name {docker_name} --device=/dev/kfd --privileged --network=host --device=/dev/dri --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -v /path/your_code_data/:/path/your_code_data/ -v /opt/hyhal:/opt/hyhal:ro --group-add video --shm-size 64G 12a8aef969d2 bash
cd /your_code_path/powerflownet_pytorch
pip install -r requirements.txt
```
### Dockerfile(方法二)
此处提供dockerfile的使用方法
```bash
cd docker
docker build --no-cache -t powerflownet:latest .
docker run -it --name {docker_name} --device=/dev/kfd --privileged --network=host --device=/dev/dri --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -v /path/your_code_data/:/path/your_code_data/ -v /opt/hyhal:/opt/hyhal:ro --group-add video --shm-size 64G {imageID} bash
cd /your_code_path/powerflownet_pytorch
pip install -r requirements.txt
```
### Anaconda(方法三)
关于本项目DCU显卡所需的特殊深度学习库可从[光合](https://developer.sourcefind.cn/tool/)开发者社区下载安装。
```bash
DTK: 25.04
python: 3.10
torch: 2.4.1+das.opt1.dtk25041
```
`Tips:以上dtk驱动、torch等DCU相关工具版本需要严格一一对应`
其它非深度学习库安装方式如下:
```bash
pip install -r requirements.txt
```
## 数据集
下载后解压到 ./powerflownet_pytorch/data/raw/ 文件夹
- https://surfdrive.surf.nl/files/index.php/s/Qw4RHLvI2RPBIBL
## 训练
```bash
python3 train.py --cfg_json ./configs/standard.json\
--num-epochs 10\
--data-dir ./data/
--batch-size 128\
--train_loss_fn mse_loss\
--lr 0.001\
--case 118v2\
--model MaskEmbdMultiMPN\
--save
```
## 推理
暂无
## result
详见./res 文件夹
### 精度
DCU(K100_AI)与GPU(A800)训练powerflownet精度一致,框架:pytorch
## 应用场景
### 算法类别
`图神经网络`
### 热点应用行业
`制造,电力,教育`
## 预训练权重
- [PowerFlowNet](https://surfdrive.surf.nl/files/index.php/s/iunfVTGsABT5NaD)
## 源码仓库及问题反馈
- https://developer.sourcefind.cn/codes/modelzoo/powerflownet_pytorch
## 参考资料
- https://github.com/StavrosOrf/PoweFlowNet
\ No newline at end of file
import torch.nn as nn
import torch
from torchvision import datasets, transforms
from torch import nn
import torch.nn.functional as F
from torch_geometric.nn import MessagePassing
from datasets.PowerFlowData import PowerFlowData
from utils.custom_loss_functions import PowerImbalance
def main():
# TODO import trainset, select an data.y, calculate the imbalance
trainset = PowerFlowData(root='data', case='118mini', split=[.5, .3, .2], task='train',
normalize=True)
valset = PowerFlowData(root='~/data/volume_2/power_flow_dataset', case='118v2', split=[.5, .3, .2], task='val', normalize=True)
# sample = trainset[3]
# # loss_fn = PowerImbalance(trainset.xymean, trainset.xystd)
# loss_fn = PowerImbalance(*trainset.get_data_means_stds())
# x = torch.arange(18).reshape((3, 6)).float()
# edge_index = torch.tensor([
# [0, 1, 1, 2],
# [1, 0, 2, 1]
# ]).long()
# edge_attr = torch.tensor([
# [1, 0],
# [2, 0],
# [3, 0],
# [4, 0]
# ]).float()
# # loss = loss_fn(x, edge_index, edge_attr)
# sample.y = sample.y[:, :]
# loss = loss_fn(sample.y, sample.edge_index, sample.edge_attr)
# print(loss)
loss_fn = PowerImbalance(*valset.get_data_means_stds())
val_loss = 0.
for i in range(len(valset)):
val_loss += loss_fn(valset[i].y, valset[i].edge_index, valset[i].edge_attr)
val_loss /= len(valset)
with open('val_loss.txt', 'w') as f:
f.write(str(val_loss))
if __name__ == '__main__':
main()
\ No newline at end of file
# import cvxopt as cvxopt
import cvxpy as cp
import numpy as np
import os
from torch_geometric.loader import DataLoader
from datasets.PowerFlowData import PowerFlowData
from utils.argument_parser import argument_parser
from utils.custom_loss_functions import Masked_L2_loss
from pygsp import graphs
import torch
def collaborative_filtering_testing(y, mask, B, x_gt,f, eval_loss_fn=Masked_L2_loss(regularize=False)):
# decision variables
z_hat = cp.Variable((x_gt.shape[0], x_gt.shape[1]))
distance = 1/2 * \
cp.square(cp.pnorm(cp.multiply(y, mask)-cp.multiply(z_hat, mask), f))
normalizer = cp.square(cp.pnorm(z_hat, f))
# trace = cp.trace(cp.matmul(cp.multiply(z_hat,mask).T, cp.matmul(L, cp.multiply(z_hat,mask))))
# trace = cp.trace(cp.matmul(z_hat.T, cp.matmul(L,z_hat)))
trace = cp.norm(B@z_hat, 2)
# lambda_z = 0.0001
# prob = cp.Problem(cp.Minimize(1/2 * error + lambda_z*cp.norm(z_hat,1)))
losses = []
rnmse_list = []
# for alpha in set [0,10] with 0,01 step
alphas = np.arange(0, 5, 0.5)
aplhas = []
alphas = [0.1, 0.5, 1, 10]
lambda_L_list = np.arange(0, 3, 0.5)
lambda_z_list = np.arange(0, 3, 0.5)
results = np.zeros((len(lambda_L_list), len(lambda_z_list)))
for i, lambda_L in enumerate(lambda_L_list):
for j, lambda_z in enumerate(lambda_z_list):
prob = cp.Problem(cp.Minimize(
distance + lambda_z*normalizer + lambda_L*trace))
print("problem is solved...")
prob.solve()
print("status:", prob.status)
# rnmse = np.sqrt(np.square(z_hat.value-x_gt).mean())/y.std()
# rnmse_list.append(rnmse)
# print(f"The rNMSE is: {np.round(rnmse,4)}")
# print("z_hat: ", z_hat.value*mask.numpy())
# print("x_gt: ", x_gt*mask.numpy())
# print("mask: ", mask)
z_tensor = torch.tensor(z_hat.value)
x_gt_tensor = torch.tensor(x_gt)
loss = eval_loss_fn(z_tensor, x_gt_tensor, mask)
# print("loss: ", loss.item())
# losses.append(loss.item())
results[i, j] = loss.item()
# plot a 2d heatmap of the results
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme()
ax = sns.heatmap(results, annot=True, fmt=".2f", cmap="YlGnBu")
ax.set_xlabel("lambda_z")
ax.set_ylabel("lambda_L")
plt.show()
def tikhonov_regularizer(alpha, L, y, mask):
# Tikhonov regularization
z_hat = np.matmul(np.matmul(np.linalg.inv(alpha*L + np.eye(L.shape[0])), L), y)
return z_hat
if __name__ == "__main__":
data_dir = "./data/"
grid_case = "5"
grid_case = "14"
# grid_case = "9"
# grid_case = "6470rte"
# grid_case = "118"
# Load the dataset
trainset = PowerFlowData(root=data_dir, case=grid_case,
split=[.5, .2, .3], task='train')
valset = PowerFlowData(root=data_dir, case=grid_case,
split=[.5, .2, .3], task='val')
testset = PowerFlowData(root=data_dir, case=grid_case,
split=[.5, .2, .3], task='test')
# train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
# val_loader = DataLoader(valset, batch_size=batch_size, shuffle=False)
# test_loader = DataLoader(testset, batch_size=batch_size, shuffle=False)
# Load adjacency matrix from file
file_path = data_dir + "/raw/case" + str(grid_case) + '_adjacency_matrix.npy'
adjacency_matrix = np.load(file_path)
print(adjacency_matrix.shape)
num_of_nodes = adjacency_matrix.shape[0]
print(f'Number of nodes: {num_of_nodes}')
# create graph from adjacency matrix
G = graphs.Graph(adjacency_matrix)
# get incidence matrix
G.compute_differential_operator()
B = G.D.toarray()
print(f'B: {B.shape}')
# get laplacian matrix
L = G.L.toarray()
print(f'Laplacian: {L.shape}')
# Get the data
# x_gt is the actual values
x_gt = trainset.y[:num_of_nodes, :4].numpy()
print("x_gt: ", x_gt.shape, x_gt[0, :])
# y is the observations, i.e. the features with missing values
y = trainset.x[:num_of_nodes, 4:8]
print("y: ", y.shape, y[0, :])
# mask of values to be predicted
mask = trainset.x[:num_of_nodes, 10:14]
# find values x from ys
# x_hat is the predicted values of the features with missing values
# Create the problem
rnmse_list = []
print("problem is constructed...")
f = x_gt.shape[1]
print("f: ", f)
collaborative_filtering_testing(y, mask, B,x_gt,f)
eval_loss_fn=Masked_L2_loss(regularize=False)
result = tikhonov_regularizer(1.25, L, y, mask)
loss = eval_loss_fn(torch.tensor(result), torch.tensor(x_gt), mask)
print("loss: ", loss.item())
\ No newline at end of file
{
"nfeature_dim": 6,
"efeature_dim": 5,
"output_dim": 6,
"hidden_dim": 512,
"n_gnn_layers": 10,
"K": 3,
"dropout_rate": 0.2
}
\ No newline at end of file
{
"nfeature_dim": 6,
"efeature_dim": 5,
"output_dim": 6,
"hidden_dim": 512,
"n_gnn_layers": 5,
"K": 3,
"dropout_rate": 0.2
}
\ No newline at end of file
{
"nfeature_dim": 6,
"efeature_dim": 5,
"output_dim": 6,
"hidden_dim": 64,
"n_gnn_layers": 2,
"K": 3,
"dropout_rate": 0.2
}
\ No newline at end of file
{
"nfeature_dim": 6,
"efeature_dim": 5,
"output_dim": 6,
"hidden_dim": 129,
"n_gnn_layers": 4,
"K": 3,
"dropout_rate": 0.2
}
\ No newline at end of file
# write an example code for a power flow using pypower
from pygsp import graphs, plotting
import pypower.api as pp
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
def print_Bus_data(case, case2):
print("\n======================================")
print("Bus Data")
print("======================================")
print("%7s %8s %7s %7s %7s %7s %7s %7s %7s %7s %7s %7s %7s" % ('bus', 'type',
'Pd', 'Qd', 'Gs', 'Bs', 'area', 'Vm', 'Va', 'baseKV', 'zone', 'maxVm', 'minVm'))
print("======================================"*3)
for i in range(n):
print("%7d %8d %7.1f %7.1f %7.1f %7.1f %7d %7.3f %7.3f %7.1f %7d %7.3f %7.3f" % (case['bus'][i, 0], case['bus'][i, 1], case['bus'][i, 2], case['bus'][i, 3], case['bus'][
i, 4], case['bus'][i, 5], case['bus'][i, 6], case['bus'][i, 7], case['bus'][i, 8], case['bus'][i, 9], case['bus'][i, 10], case['bus'][i, 11], case['bus'][i, 12]))
print("======================================"*3)
for i in range(n):
print("%7d %8d %7.1f %7.1f %7.1f %7.1f %7d %7.3f %7.3f %7.1f %7d %7.3f %7.3f" % (case2['bus'][i, 0], case2['bus'][i, 1], case2['bus'][i, 2], case2['bus'][i, 3], case2['bus'][
i, 4], case2['bus'][i, 5], case2['bus'][i, 6], case2['bus'][i, 7], case2['bus'][i, 8], case2['bus'][i, 9], case2['bus'][i, 10], case2['bus'][i, 11], case2['bus'][i, 12]))
def print_Gen_data(case, case2):
# print generator data with column names
print("\n======================================")
print("Generator Data")
print("======================================")
print("%7s %7s %7s %7s %7s %7s %7s %7s %7s %7s" % ('bus', 'Pg',
'Qg', 'Qmax', 'Qmin', 'Vg', 'mBase', 'status', 'Pmax', 'Pmin'))
print("======================================"*3)
for i in range(case['gen'].shape[0]):
print("%7d %7.1f %7.1f %7.1f %7.1f %7.3f %7.1f %7d %7.1f %7.1f" % (case['gen'][i, 0], case['gen'][i, 1], case['gen'][i, 2], case[
'gen'][i, 3], case['gen'][i, 4], case['gen'][i, 5], case['gen'][i, 6], case['gen'][i, 7], case['gen'][i, 8], case['gen'][i, 9]))
print("======================================"*3)
for i in range(case2['gen'].shape[0]):
print("%7d %7.1f %7.1f %7.1f %7.1f %7.3f %7.1f %7d %7.1f %7.1f" % (case2['gen'][i, 0], case2['gen'][i, 1], case2['gen'][i, 2], case2[
'gen'][i, 3], case2['gen'][i, 4], case2['gen'][i, 5], case2['gen'][i, 6], case2['gen'][i, 7], case2['gen'][i, 8], case2['gen'][i, 9]))
def print_Branch_data(case, case2):
# print branch data with column names
#
print("\n======================================")
print("Branch Data")
print("======================================")
print("%7s %7s %7s %7s %7s %7s %7s %7s %7s %7s" % ('fbus', 'tbus',
'r', 'x', 'b', 'rateA', 'rateB', 'rateC', 'ratio', 'angle'))
print("======================================"*3)
for i in range(case['branch'].shape[0]):
print("%7d %7d %7.3f %7.3f %7.5f %7.1f %7.1f %7.1f %7.3f %7.3f" % (case['branch'][i, 0], case['branch'][i, 1], case['branch'][i, 2], case['branch']
[i, 3], case['branch'][i, 4], case['branch'][i, 5], case['branch'][i, 6], case['branch'][i, 7], case['branch'][i, 8], case['branch'][i, 9]))
print("======================================"*3)
for i in range(case2['branch'].shape[0]):
print("%7d %7d %7.3f %7.3f %7.5f %7.1f %7.1f %7.1f %7.3f %7.3f" % (case2['branch'][i, 0], case2['branch'][i, 1], case2['branch'][i, 2], case2['branch'][
i, 3], case2['branch'][i, 4], case2['branch'][i, 5], case2['branch'][i, 6], case2['branch'][i, 7], case2['branch'][i, 8], case2['branch'][i, 9]))
def get_Admittance_matrices(case):
# Get admittance matrices using pypower
ppc = pp.ext2int(case)
baseMVA, bus, gen, branch = ppc["baseMVA"], ppc["bus"], ppc["gen"], ppc["branch"]
Ybus, Yf, Yt = pp.makeYbus(baseMVA, bus, branch)
#Builds the vector of complex bus power injections.
Sbus = pp.makeSbus(baseMVA, bus, gen)
print(Ybus.shape)
print(Yf.shape)
print(Yt.shape)
return Ybus, Yf, Yt, Sbus
# Dataset generation parameters
number_of_samples = 100000
#Tested case files: case4gs, case14, case30, case118
test_case = 'case14'
base_case = pp.case14()
PD_factor = 1
# Get Adjacency Matrix
bus_names = base_case['bus'][:, 0].tolist()
n = base_case['bus'].shape[0]
A = np.zeros((n, n))
for edge in base_case['branch']:
edge_1 = bus_names.index(edge[0])
edge_2 = bus_names.index(edge[1])
A[edge_1, edge_2] = 1
A[edge_2, edge_1] = 1
edge_features_list = []
node_features_x_list = []
node_features_y_list = []
graph_feature_list = []
while True:
case = base_case
# Get random values for the parameters
r = case['branch'][:, 2]
x = case['branch'][:, 3]
b = case['branch'][:, 4]
tau = case['branch'][:, 8] # ratio
Pmax = case['gen'][:, 8]
Pmin = case['gen'][:, 9]
Pd = case['bus'][:, 2]
r = np.random.uniform(0.8*r, 1.2*r, case['branch'].shape[0])
x = np.random.uniform(0.8*x, 1.2*x, case['branch'].shape[0])
b = np.random.uniform(0.1*b, 2.0*b, case['branch'].shape[0])
# tau = np.random.uniform(0.8*tau, 1.2*tau, case['branch'].shape[0]) # NOTE shouldn't change this. but does it make a difference?
# angle = np.random.uniform(-0.2, 0.2, case['branch'].shape[0]) # NOTE should theoretically not matter
vg = np.random.uniform(0.95, 1.05, case['gen'].shape[0])
Pg = np.random.uniform(0.25*Pmax, 0.75*Pmax, case['gen'].shape[0])
Pd = np.random.uniform(0.5*Pd, 1.5*Pd, case['bus'].shape[0])
Qd = np.random.uniform(0.5*Pd, 1.5*Pd, case['bus'].shape[0])
# case['branch'][:, 2] = r
# case['branch'][:, 3] = x
case['branch'][:, 4] = b
# case['branch'][:, 8] = tau
# case['branch'][:, 9] = angle
case['branch'][:, 8] = 0.
case['branch'][:, 9] = 0.
# # print(vg)
# case['gen'][:, 5] = vg
# # print(case['gen'][:, 5])
# # print(Pg)
# case['gen'][:, 1] = Pg
# # print(case['gen'][:, 1])
# case['bus'][:, 2] = Pd * PD_factor
# case['bus'][:, 3] = Qd
# print_Bus_data(base_case, case)
# print_Gen_data(base_case, case)
# print_Branch_data(base_case, case)
ppopt = pp.ppoption()
ppopt["PF_MAX_IT"] = 10
ppopt['VERBOSE'] = True
x = pp.runpf(case,ppopt=ppopt)
if x[1] == 0:
print(f'Failed to converge, current sample number: {len(edge_features_list)}')
continue
# Graph feature
baseMVA = x[0]['baseMVA']
# Create a vector od branch features including start and end nodes,r,x,b,tau,angle
edge_features = np.zeros((case['branch'].shape[0], 7))
edge_features[:, 0] = case['branch'][:, 0]
edge_features[:, 1] = case['branch'][:, 1]
edge_features[:, 2] = case['branch'][:, 2]
edge_features[:, 3] = case['branch'][:, 3]
edge_features[:, 4] = case['branch'][:, 4]
edge_features[:, 5] = case['branch'][:, 8]
edge_features[:, 6] = case['branch'][:, 9]
# Create a vector of node features including index, type, Vm, Va, Pd, Qd, Gs, Bs, Pg
case['bus'] = x[0]['bus']
node_features_x = np.zeros((case['bus'].shape[0], 9))
node_features_x[:, 0] = case['bus'][:, 0] # index
node_features_x[:, 1] = case['bus'][:, 1] # type
# Va ----This changes for every bus excecpt slack bus
node_features_x[:, 3] = np.zeros(case['bus'].shape[0])
node_features_x[:, 4] = case['bus'][:, 2] # Pd
node_features_x[:, 5] = case['bus'][:, 3] # Qd
node_features_x[:, 6] = case['bus'][:, 4] # Gs
node_features_x[:, 7] = case['bus'][:, 5] # Bs
# Vm is 1 if type is not "generator" else it is case['gen'][:,j]
vm = np.ones(case['bus'].shape[0])
for j in range(case['gen'].shape[0]):
# find index of case['gen'][j,0] in case['bus'][:,0]
index = np.where(case['bus'][:, 0] == case['gen'][j, 0])[0][0]
vm[index] = case['gen'][j, 5] # Vm = Vg
node_features_x[index, 8] = case['gen'][j, 1] # Pg
node_features_x[:, 2] = vm # Vm
# Create a vector of node features including index, type, Vm, Va, Pd, Qd, Gs, Bs
case['bus'] = x[0]['bus']
node_features_y = np.zeros((case['bus'].shape[0], 8))
node_features_y[:, 0] = case['bus'][:, 0] # index
node_features_y[:, 1] = case['bus'][:, 1] # type
# Vm ----This changes for Load Buses
node_features_y[:, 2] = case['bus'][:, 7]
# Va ----This changes for every bus excecpt slack bus
node_features_y[:, 3] = case['bus'][:, 8]
node_features_y[:, 4] = case['bus'][:, 2] # Pd
node_features_y[:, 5] = case['bus'][:, 3] # Qd
node_features_y[:, 6] = case['bus'][:, 4] # Gs
node_features_y[:, 7] = case['bus'][:, 5] # Bs
edge_features_list.append(edge_features)
node_features_x_list.append(node_features_x)
node_features_y_list.append(node_features_y)
graph_feature_list.append(baseMVA)
if len(edge_features_list) == number_of_samples:
break
# Turn the lists into numpy arrays
edge_features = np.array(edge_features_list)
node_features_x = np.array(node_features_x_list)
node_features_y = np.array(node_features_y_list)
graph_features = np.array(graph_feature_list)
# Print the shapes
print(f'Adjacency matrix shape: {A.shape}')
print(f'edge_features shape: {edge_features.shape}')
print(f'node_features_x shape: {node_features_x.shape}')
print(f'node_features_y shape: {node_features_y.shape}')
print(f'graph_features shape: {graph_features.shape}')
# save the features
with open("./data/"+test_case+"_edge_features.npy", 'wb') as f:
np.save(f, edge_features)
with open("./data/"+test_case+"_node_features_x.npy", 'wb') as f:
np.save(f, node_features_x)
with open("./data/"+test_case+"_node_features_y.npy", 'wb') as f:
np.save(f, node_features_y)
with open("./data/"+test_case+"_graph_features.npy", 'wb') as f:
np.save(f, graph_features)
with open("./data/"+test_case+"_adjacency_matrix.npy", 'wb') as f:
np.save(f, A)
# write an example code for a power flow using pypower
import random
from pygsp import graphs, plotting
import pypower.api as pp
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
import networkx
def print_Bus_data(case, case2):
print("\n======================================")
print("Bus Data")
print("======================================")
print("%7s %8s %7s %7s %7s %7s %7s %7s %7s %7s %7s %7s %7s" % ('bus', 'type',
'Pd', 'Qd', 'Gs', 'Bs', 'area', 'Vm', 'Va', 'baseKV', 'zone', 'maxVm', 'minVm'))
print("======================================"*3)
for i in range(n):
print("%7d %8d %7.1f %7.1f %7.1f %7.1f %7d %7.3f %7.3f %7.1f %7d %7.3f %7.3f" % (case['bus'][i, 0], case['bus'][i, 1], case['bus'][i, 2], case['bus'][i, 3], case['bus'][
i, 4], case['bus'][i, 5], case['bus'][i, 6], case['bus'][i, 7], case['bus'][i, 8], case['bus'][i, 9], case['bus'][i, 10], case['bus'][i, 11], case['bus'][i, 12]))
print("======================================"*3)
for i in range(n):
print("%7d %8d %7.1f %7.1f %7.1f %7.1f %7d %7.3f %7.3f %7.1f %7d %7.3f %7.3f" % (case2['bus'][i, 0], case2['bus'][i, 1], case2['bus'][i, 2], case2['bus'][i, 3], case2['bus'][
i, 4], case2['bus'][i, 5], case2['bus'][i, 6], case2['bus'][i, 7], case2['bus'][i, 8], case2['bus'][i, 9], case2['bus'][i, 10], case2['bus'][i, 11], case2['bus'][i, 12]))
def print_Gen_data(case, case2):
# print generator data with column names
print("\n======================================")
print("Generator Data")
print("======================================")
print("%7s %7s %7s %7s %7s %7s %7s %7s %7s %7s" % ('bus', 'Pg',
'Qg', 'Qmax', 'Qmin', 'Vg', 'mBase', 'status', 'Pmax', 'Pmin'))
print("======================================"*3)
for i in range(case['gen'].shape[0]):
print("%7d %7.1f %7.1f %7.1f %7.1f %7.3f %7.1f %7d %7.1f %7.1f" % (case['gen'][i, 0], case['gen'][i, 1], case['gen'][i, 2], case[
'gen'][i, 3], case['gen'][i, 4], case['gen'][i, 5], case['gen'][i, 6], case['gen'][i, 7], case['gen'][i, 8], case['gen'][i, 9]))
print("======================================"*3)
for i in range(case2['gen'].shape[0]):
print("%7d %7.1f %7.1f %7.1f %7.1f %7.3f %7.1f %7d %7.1f %7.1f" % (case2['gen'][i, 0], case2['gen'][i, 1], case2['gen'][i, 2], case2[
'gen'][i, 3], case2['gen'][i, 4], case2['gen'][i, 5], case2['gen'][i, 6], case2['gen'][i, 7], case2['gen'][i, 8], case2['gen'][i, 9]))
def print_Branch_data(case, case2):
# print branch data with column names
#
print("\n======================================")
print("Branch Data")
print("======================================")
print("%7s %7s %7s %7s %7s %7s %7s %7s %7s %7s" % ('fbus', 'tbus',
'r', 'x', 'b', 'rateA', 'rateB', 'rateC', 'ratio', 'angle'))
print("======================================"*3)
for i in range(case['branch'].shape[0]):
print("%7d %7d %7.3f %7.3f %7.5f %7.1f %7.1f %7.1f %7.3f %7.3f" % (case['branch'][i, 0], case['branch'][i, 1], case['branch'][i, 2], case['branch']
[i, 3], case['branch'][i, 4], case['branch'][i, 5], case['branch'][i, 6], case['branch'][i, 7], case['branch'][i, 8], case['branch'][i, 9]))
print("======================================"*3)
for i in range(case2['branch'].shape[0]):
print("%7d %7d %7.3f %7.3f %7.5f %7.1f %7.1f %7.1f %7.3f %7.3f" % (case2['branch'][i, 0], case2['branch'][i, 1], case2['branch'][i, 2], case2['branch'][
i, 3], case2['branch'][i, 4], case2['branch'][i, 5], case2['branch'][i, 6], case2['branch'][i, 7], case2['branch'][i, 8], case2['branch'][i, 9]))
def get_Admittance_matrices(case):
# Get admittance matrices using pypower
ppc = pp.ext2int(case)
baseMVA, bus, gen, branch = ppc["baseMVA"], ppc["bus"], ppc["gen"], ppc["branch"]
Ybus, Yf, Yt = pp.makeYbus(baseMVA, bus, branch)
#Builds the vector of complex bus power injections.
Sbus = pp.makeSbus(baseMVA, bus, gen)
print(Ybus.shape)
print(Yf.shape)
print(Yt.shape)
return Ybus, Yf, Yt, Sbus
# Dataset generation parameters
number_of_samples = 10000
#Tested case files: case4gs, case14, case30, case118;; case14v2
test_case = 'case14v2'
base_case = pp.case14()
create_base_case = pp.case14
PD_factor = 1
# --- DEBUG ---
graph = networkx.from_edgelist(base_case['branch'][:,:2].tolist())
networkx.draw(graph, with_labels=True)
# --- DEBUG ---
# Get Adjacency Matrix
# -- Can use networkx function
bus_names = base_case['bus'][:, 0].tolist()
n = base_case['bus'].shape[0]
A = np.zeros((n, n))
for edge in base_case['branch']:
edge_1 = bus_names.index(edge[0])
edge_2 = bus_names.index(edge[1])
A[edge_1, edge_2] = 1
A[edge_2, edge_1] = 1
edge_features_list = []
node_features_x_list = []
node_features_y_list = []
graph_feature_list = []
while True:
case = create_base_case().copy()
# Step 1: Randomized generator node
ori_num_gen = case['gen'].shape[0]
delta_num_gen = np.random.randint(-ori_num_gen // 7, ori_num_gen // 7) # ~14%
# delta_num_gen = 2
if delta_num_gen < 0:
case['gen'] = case['gen'][:delta_num_gen,:]
case['gencost'] = case['gencost'][:delta_num_gen,:]
if delta_num_gen > 0:
case['gen'] = np.concatenate([case['gen'], case['gen'][:delta_num_gen,:]], axis=0) # rule: copy the first few
case['gencost'] = np.concatenate([case['gencost'], case['gencost'][:delta_num_gen,:]], axis=0) # rule: copy the first few
all_nodes = list(range(1, case['bus'].shape[0]+1)) # starting from 1
gen_nodes = list(case['gen'][:,0].astype(int)) # ref node is included in case['gen']
# ref_nodes = list(case['bus'][case['bus'][:,1]==3,0].astype(int))
# load_nodes = [node_idx for node_idx in all_nodes if node_idx not in gen_nodes and node_idx not in ref_nodes]
load_nodes = [node_idx for node_idx in all_nodes if node_idx not in gen_nodes]
new_gen_nodes = random.sample(load_nodes, 2)
case['gen'][-delta_num_gen:,0] = new_gen_nodes
# Step 2: Get original values for the case file
r = case['branch'][:, 2]
x = case['branch'][:, 3]
b = case['branch'][:, 4]
tau = case['branch'][:, 8] # ratio
Pmax = case['gen'][:, 8]
Pmin = case['gen'][:, 9]
Pd = case['bus'][:, 2]
# Step 3: Randomize input data
# -- Step 3.1: branch data
r = np.random.uniform(0.8*r, 1.2*r, case['branch'].shape[0])
x = np.random.uniform(0.8*x, 1.2*x, case['branch'].shape[0])
b = np.zeros(case['branch'].shape[0])
tau = np.zeros(case['branch'].shape[0])
angle = np.zeros(case['branch'].shape[0])
case['branch'][:, 2] = r
case['branch'][:, 3] = x
case['branch'][:, 4] = b
case['branch'][:, 8] = tau
case['branch'][:, 9] = angle
# -- Step 3.2: bus data
Vg = np.random.uniform(0.95, 1.05, case['gen'].shape[0])
Pg = np.random.uniform(0.25*Pmax, 1.25*Pmax, case['gen'].shape[0])
Pd = np.random.uniform(0.5*Pd, 1.5*Pd, case['bus'].shape[0])
Qd = np.random.uniform(0.5*Pd, 1.5*Pd, case['bus'].shape[0])
Gs = np.zeros(case['bus'].shape[0])
Bs = np.zeros(case['bus'].shape[0])
# power balance adjust
# only active, reactive Qg is unknown
sum_Pg = np.sum(Pg)
sum_Pd = np.sum(Pd)
Pd = Pd/sum_Pd * sum_Pg * np.random.uniform(0.90, 0.97) # total Pd = total Pg - transmission loss
# Still not quite well set. MWs losses on the lines, not quite realistic... But physically allowed.
# generator bus
case['gen'][:, 5] = Vg
case['gen'][:, 1] = Pg
# all bus
for bus in case['bus'][:, 0]:
if case['bus'][int(bus)-1, 1] == 3:
pass
elif bus in case['gen'][:, 0]:
case['bus'][int(bus)-1, 1] = 2
else:
case['bus'][int(bus)-1, 1] = 1
case['bus'][:, 2] = Pd * PD_factor
case['bus'][:, 3] = Qd
case['bus'][:, 4] = Gs
case['bus'][:, 5] = Bs
# print_Bus_data(base_case, case)
# print_Gen_data(base_case, case)
# print_Branch_data(base_case, case)
ppopt = pp.ppoption()
ppopt["PF_MAX_IT"] = 10
ppopt['VERBOSE'] = False
x = pp.runpf(case,ppopt=ppopt)
if x[1] == 0:
print(f'Failed to converge, current sample number: {len(edge_features_list)}')
continue
# Graph feature
baseMVA = x[0]['baseMVA']
# Create a vector od branch features including start and end nodes,r,x,b,tau,angle
# -- we need:
# - start node
# - end node
# - r
# - x
# -- the rest can be discarded because they're set to zero.
edge_features = np.zeros((case['branch'].shape[0], 4))
edge_features[:, 0] = case['branch'][:, 0]
edge_features[:, 1] = case['branch'][:, 1]
edge_features[:, 2] = case['branch'][:, 2]
edge_features[:, 3] = case['branch'][:, 3]
# edge_features[:, 4] = case['branch'][:, 4]
# edge_features[:, 5] = case['branch'][:, 8]
# edge_features[:, 6] = case['branch'][:, 9]
# Create a vector of node features including index, type, Vm, Va, Pd, Qd, Gs, Bs, Pg
case['bus'] = x[0]['bus']
node_features_x = np.zeros((case['bus'].shape[0], 9))
node_features_x[:, 0] = case['bus'][:, 0] # index
node_features_x[:, 1] = case['bus'][:, 1] # type
# Va ----This changes for every bus excecpt slack bus
node_features_x[:, 3] = np.zeros(case['bus'].shape[0])
node_features_x[:, 4] = case['bus'][:, 2] # Pd
node_features_x[:, 5] = case['bus'][:, 3] # Qd
node_features_x[:, 6] = case['bus'][:, 4] # Gs
node_features_x[:, 7] = case['bus'][:, 5] # Bs
# Vm is 1 if type is not "generator" else it is case['gen'][:,j]
vm = np.ones(case['bus'].shape[0])
for j in range(case['gen'].shape[0]):
# find index of case['gen'][j,0] in case['bus'][:,0]
index = np.where(case['bus'][:, 0] == case['gen'][j, 0])[0][0]
vm[index] = case['gen'][j, 5] # Vm = Vg
node_features_x[index, 8] = case['gen'][j, 1] # Pg
node_features_x[:, 2] = vm # Vm
# Create a vector of node features including index, type, Vm, Va, Pd, Qd, Gs, Bs
case['bus'] = x[0]['bus']
node_features_y = np.zeros((case['bus'].shape[0], 8))
node_features_y[:, 0] = case['bus'][:, 0] # index
node_features_y[:, 1] = case['bus'][:, 1] # type
# Vm ----This changes for Load Buses
node_features_y[:, 2] = case['bus'][:, 7]
# Va ----This changes for every bus excecpt slack bus
node_features_y[:, 3] = case['bus'][:, 8]
node_features_y[:, 4] = case['bus'][:, 2] # Pd
node_features_y[:, 5] = case['bus'][:, 3] # Qd
node_features_y[:, 6] = case['bus'][:, 4] # Gs
node_features_y[:, 7] = case['bus'][:, 5] # Bs
edge_features_list.append(edge_features)
node_features_x_list.append(node_features_x)
node_features_y_list.append(node_features_y)
graph_feature_list.append(baseMVA)
if len(edge_features_list) == number_of_samples:
break
# Turn the lists into numpy arrays
edge_features = np.array(edge_features_list)
node_features_x = np.array(node_features_x_list)
node_features_y = np.array(node_features_y_list)
graph_features = np.array(graph_feature_list)
# Print the shapes
print(f'Adjacency matrix shape: {A.shape}')
print(f'edge_features shape: {edge_features.shape}')
print(f'node_features_x shape: {node_features_x.shape}')
print(f'node_features_y shape: {node_features_y.shape}')
print(f'graph_features shape: {graph_features.shape}')
# save the features
with open("./data/"+test_case+"_edge_features.npy", 'wb') as f:
np.save(f, edge_features)
with open("./data/"+test_case+"_node_features_x.npy", 'wb') as f:
np.save(f, node_features_x)
with open("./data/"+test_case+"_node_features_y.npy", 'wb') as f:
np.save(f, node_features_y)
with open("./data/"+test_case+"_graph_features.npy", 'wb') as f:
np.save(f, graph_features)
with open("./data/"+test_case+"_adjacency_matrix.npy", 'wb') as f:
np.save(f, A)
import time
import pandapower as pp
import numpy as np
import pickle
# write file documentation here
# dict_keys(['bus', 'load', 'sgen', 'motor', 'asymmetric_load', 'asymmetric_sgen', 'storage', 'gen', 'switch', 'shunt', 'svc', 'ext_grid', 'line', 'trafo', 'trafo3w', 'impedance', 'tcsc', 'dcline', 'ward', 'xward', 'measurement', 'pwl_cost', 'poly_cost', 'characteristic', 'controller', 'group', 'line_geodata', 'bus_geodata', '_empty_res_bus', '_empty_res_ext_grid', '_empty_res_line', '_empty_res_trafo', '_empty_res_load', '_empty_res_asymmetric_load', '_empty_res_asymmetric_sgen', '_empty_res_motor', '_empty_res_sgen', '_empty_res_shunt', '_empty_res_svc', '_empty_res_switch', '_empty_res_impedance', '_empty_res_tcsc', '_empty_res_dcline', '_empty_res_ward', '_empty_res_xward', '_empty_res_trafo_3ph', '_empty_res_trafo3w', '_empty_res_bus_3ph', '_empty_res_ext_grid_3ph', '_empty_res_line_3ph', '_empty_res_asymmetric_load_3ph', '_empty_res_asymmetric_sgen_3ph', '_empty_res_storage', '_empty_res_storage_3ph', '_empty_res_gen',
# '_ppc', '_ppc0', '_ppc1', '_ppc2', '_is_elements', '_pd2ppc_lookups', 'version', 'format_version', 'converged', 'OPF_converged', 'name', 'f_hz', 'sn_mva', '_empty_res_load_3ph', '_empty_res_sgen_3ph', 'std_types', 'res_bus', 'res_line', 'res_trafo', 'res_trafo3w', 'res_impedance', 'res_ext_grid', 'res_load', 'res_motor', 'res_sgen', 'res_storage', 'res_shunt', 'res_gen', 'res_ward', 'res_xward', 'res_dcline', 'res_asymmetric_load', 'res_asymmetric_sgen', 'res_switch', 'res_tcsc', 'res_svc', 'res_bus_est', 'res_line_est', 'res_trafo_est', 'res_trafo3w_est', 'res_impedance_est', 'res_switch_est', 'res_bus_sc', 'res_line_sc', 'res_trafo_sc', 'res_trafo3w_sc', 'res_ext_grid_sc', 'res_gen_sc', 'res_sgen_sc', 'res_switch_sc', 'res_bus_3ph', 'res_line_3ph', 'res_trafo_3ph', 'res_ext_grid_3ph', 'res_shunt_3ph', 'res_load_3ph', 'res_sgen_3ph', 'res_storage_3ph', 'res_asymmetric_load_3ph', 'res_asymmetric_sgen_3ph', 'user_pf_options'])
# net = pp.networks.GBnetwork()
# algorithm (str, “nr”) - algorithm that is used to solve the power flow problem.
# The following algorithms are available:
# “nr” Newton-Raphson (pypower implementation with numba accelerations)
# “iwamoto_nr” Newton-Raphson with Iwamoto multiplier (maybe slower than NR but more robust)
# “bfsw” backward/forward sweep (specially suited for radial and weakly-meshed networks)
# “gs” gauss-seidel (pypower implementation)
# “fdbx” fast-decoupled (pypower implementation)
# “fdxb” fast-decoupled (pypower implementation)
# print(net)
# print(net.keys())
number_of_samples = 100000
test_case = 'case118'
# base_net = pp.networks.case6470rte()
base_net = pp.networks.case118()
base_net.bus['name'] = base_net.bus.index
print(base_net.bus)
print(base_net.line)
print(base_net.gen)
# Get Adjacency Matrix
bus_names = base_net.bus['name'].values.tolist()
n = base_net.bus.values.shape[0]
A = np.zeros((n, n))
for edge1,edge2 in base_net.line[['from_bus', 'to_bus']].values:
edge_1 = bus_names.index(edge1)
edge_2 = bus_names.index(edge2)
A[edge_1, edge_2] = 1
A[edge_2, edge_1] = 1
edge_features_list = []
node_features_x_list = []
node_features_y_list = []
graph_feature_list = []
reconstruction_case_list = []
ref_bus = base_net.ext_grid['bus'].values[0]
print(f'ref_bus: {ref_bus}')
r_original = base_net.line['r_ohm_per_km'].values
x_original = base_net.line['x_ohm_per_km'].values
vm_original = base_net.gen['vm_pu'].values
vg_original = base_net.gen['p_mw'].values
pd_original = base_net.load['p_mw'].values
qd_original = base_net.load['q_mvar'].values
net = base_net
counter = 0
while True:
counter += 1
if counter % 100 == 0:
base_net = pp.networks.case118()
else:
net = base_net
# net = pp.networks.case6470rte()
# net = pp.networks.case14()
net.bus['name'] = base_net.bus.index
# net.line['r_ohm_per_km'] = r_original
# net.line['x_ohm_per_km'] = x_original
# net.gen['vm_pu'] = vm_original
# net.gen['p_mw'] = vg_original
# net.load['p_mw'] = pd_original
# net.load['q_mvar'] = qd_original
r = net.line['r_ohm_per_km'].values
x = net.line['x_ohm_per_km'].values
# c = net.line['c_nf_per_km'].values
le = net.line['length_km'].values
# x = case['branch'][:, 3]
# b = case['branch'][:, 4]
# tau = case['branch'][:, 8] # ratio
Pmax = net.gen['max_p_mw'].values
Pmin = net.gen['min_p_mw'].values
Pmw = net.gen['p_mw'].values
Pd = net.load['p_mw'].values
Qd = net.load['q_mvar'].values
r = np.random.uniform(0.8*r, 1.2*r, r.shape[0])
x = np.random.uniform(0.8*x, 1.2*x, x.shape[0])
# le = np.random.uniform(0.8*le, 1.2*le, le.shape[0])
vg = np.random.uniform(0.95, 1.05, net.gen['vm_pu'].shape[0])
# Pg = np.random.uniform(0.25*Pmax, 0.75*Pmax, net.gen['p_mw'].shape[0])
Pg = np.random.uniform(0.25*Pmw, 1.75*Pmw, net.gen['p_mw'].shape[0])
Pd = np.random.uniform(0.5*Pd, 1.5*Pd, net.load['p_mw'].shape[0])
Qd = np.random.uniform(0.5*Qd, 1.5*Qd, net.load['q_mvar'].shape[0])
net.line['r_ohm_per_km'] = r
net.line['x_ohm_per_km'] = x
net.gen['vm_pu'] = vg
net.gen['p_mw'] = Pg
net.load['p_mw'] = Pd
net.load['q_mvar'] = Qd
try:
pp.runpp(net, algorithm='nr', init="results", numba=False)
except:
print(f'Failed to converge, current sample number: {len(edge_features_list)}')
continue
reconstruction_case_list.append((r,x,vg,Pg,Pd,Qd))
# Graph feature
# baseMVA = x[0]['baseMVA']
# Create a vector od branch features including start and end nodes,r,x,b,tau,angle
edge_features = np.zeros((net.line.shape[0], 7))
edge_features[:, 0] = net.line['from_bus'].values + 1
edge_features[:, 1] = net.line['to_bus'].values + 1
edge_features[:, 2] = net.line['r_ohm_per_km'].values * net.line['length_km'].values
edge_features[:, 3] = net.line['x_ohm_per_km'].values * net.line['length_km'].values
edge_features[:, 4] = 0
edge_features[:, 5] = 0
edge_features[:, 6] = 0
# Create a vector of node features including index, type, Vm, Va, Pd, Qd, Gs, Bs, Pg
# case['bus'] = x[0]['bus']
node_features_x = np.zeros((n, 9))
node_features_x[:, 0] = net.bus['name'].values + 1# index
# Va ----This changes for every bus excecpt slack bus
node_features_x[:, 3] = np.zeros((n, )) #Va
node_features_x[ref_bus, 3] = 0 #Va for reference bus
# node_features_x[:, 6] = np.zeros((n,1)) # Gs
# node_features_x[:, 7] = np.zeros((n,1)) # Bs
# Vm is 1 if type is not "generator" else it is case['gen'][:,j]
vm = np.ones(n)
types = np.ones(n)*2
for j in range(net.gen.shape[0]):
# find index of case['gen'][j,0] in case['bus'][:,0]
index = np.where(net.gen['bus'].values[j] == net.bus['name'])[0][0]
vm[index] = net.gen['vm_pu'].values[j] # Vm = Vg
types[index] = 1 # type = generator
node_features_x[index, 8] = net.gen['p_mw'].values[j] # Pg
types[ref_bus] = 3 #
node_features_x[:, 2] = vm # Vm
node_features_x[:, 1] = types # type
for j in range(net.load.shape[0]):
# find index of case['gen'][j,0] in case['bus'][:,0]
index = np.where(net.load['bus'].values[j] == net.bus['name'])[0][0]
node_features_x[index, 4] = Pd[j] # Pd
node_features_x[index, 5] = Qd[j] # Qd
# Create a vector of node features including index, type, Vm, Va, Pd, Qd, Gs, Bs
node_features_y = np.zeros((n, 8))
node_features_y[:, 0] = net.bus['name'].values + 1 # index
node_features_y[:, 1] = types # type
# Vm ----This changes for Load Buses
node_features_y[:, 2] = net.res_bus['vm_pu'] # Vm
# Va ----This changes for every bus excecpt slack bus
node_features_y[:, 3] = net.res_bus['va_degree'] # Va
node_features_y[:, 4] = net.res_bus['p_mw'] # P
node_features_y[:, 5] = net.res_bus['q_mvar'] # Q
# node_features_y[:, 6] = case['bus'][:, 4] # Gs
# node_features_y[:, 7] = case['bus'][:, 5] # Bs
edge_features_list.append(edge_features)
node_features_x_list.append(node_features_x)
node_features_y_list.append(node_features_y)
# graph_feature_list.append(baseMVA)
if len(edge_features_list) == number_of_samples:
break
elif len(edge_features_list) % 100 == 0:
print(f'Current sample number: {len(edge_features_list)}')
if len(edge_features_list) % 1000 == 0:
# Turn the lists into numpy arrays
edge_features = np.array(edge_features_list)
node_features_x = np.array(node_features_x_list)
node_features_y = np.array(node_features_y_list)
# graph_features = np.array(graph_feature_list)
# reconstruction_case = np.array(reconstruction_case_list)
with open("./data/raw/"+test_case+"_edge_features.npy", 'wb') as f:
np.save(f, edge_features)
with open("./data/raw/"+test_case+"_node_features_x.npy", 'wb') as f:
np.save(f, node_features_x)
with open("./data/raw/"+test_case+"_node_features_y.npy", 'wb') as f:
np.save(f, node_features_y)
#pickle the reconstructed cases
with open("./data/raw/"+test_case+"_reconstruction_case.pkl", 'wb') as f:
pickle.dump(reconstruction_case_list, f)
# with open("./data/"+test_case+"_graph_features.npy", 'wb') as f:
# np.save(f, graph_features)
with open("./data/raw/"+test_case+"_adjacency_matrix.npy", 'wb') as f:
np.save(f, A)
edge_features = np.array(edge_features_list)
node_features_x = np.array(node_features_x_list)
node_features_y = np.array(node_features_y_list)
# graph_features = np.array(graph_feature_list)
# Print the shapes
print(f'Adjacency matrix shape: {A.shape}')
print(f'edge_features shape: {edge_features.shape}')
print(f'node_features_x shape: {node_features_x.shape}')
print(f'node_features_y shape: {node_features_y.shape}')
# print(f'graph_features shape: {graph_features.shape}')
print(f'range of edge_features "from": {np.min(edge_features[:,:,0])} - {np.max(edge_features[:,:,0])}')
print(f'range of edge_features "to": {np.min(edge_features[:,:,1])} - {np.max(edge_features[:,:,1])}')
print(f'range of node_features_x "index": {np.min(node_features_x[:,:,0])} - {np.max(node_features_x[:,:,0])}')
print(f'range of node_features_y "index": {np.min(node_features_y[:,:,0])} - {np.max(node_features_y[:,:,0])}')
import time
import pandas as pd
import pandapower as pp
import numpy as np
import networkx as nx
import multiprocessing as mp
import os
# write file documentation here
# dict_keys(['bus', 'load', 'sgen', 'motor', 'asymmetric_load', 'asymmetric_sgen', 'storage', 'gen', 'switch', 'shunt', 'svc', 'ext_grid', 'line', 'trafo', 'trafo3w', 'impedance', 'tcsc', 'dcline', 'ward', 'xward', 'measurement', 'pwl_cost', 'poly_cost', 'characteristic', 'controller', 'group', 'line_geodata', 'bus_geodata', '_empty_res_bus', '_empty_res_ext_grid', '_empty_res_line', '_empty_res_trafo', '_empty_res_load', '_empty_res_asymmetric_load', '_empty_res_asymmetric_sgen', '_empty_res_motor', '_empty_res_sgen', '_empty_res_shunt', '_empty_res_svc', '_empty_res_switch', '_empty_res_impedance', '_empty_res_tcsc', '_empty_res_dcline', '_empty_res_ward', '_empty_res_xward', '_empty_res_trafo_3ph', '_empty_res_trafo3w', '_empty_res_bus_3ph', '_empty_res_ext_grid_3ph', '_empty_res_line_3ph', '_empty_res_asymmetric_load_3ph', '_empty_res_asymmetric_sgen_3ph', '_empty_res_storage', '_empty_res_storage_3ph', '_empty_res_gen',
# '_ppc', '_ppc0', '_ppc1', '_ppc2', '_is_elements', '_pd2ppc_lookups', 'version', 'format_version', 'converged', 'OPF_converged', 'name', 'f_hz', 'sn_mva', '_empty_res_load_3ph', '_empty_res_sgen_3ph', 'std_types', 'res_bus', 'res_line', 'res_trafo', 'res_trafo3w', 'res_impedance', 'res_ext_grid', 'res_load', 'res_motor', 'res_sgen', 'res_storage', 'res_shunt', 'res_gen', 'res_ward', 'res_xward', 'res_dcline', 'res_asymmetric_load', 'res_asymmetric_sgen', 'res_switch', 'res_tcsc', 'res_svc', 'res_bus_est', 'res_line_est', 'res_trafo_est', 'res_trafo3w_est', 'res_impedance_est', 'res_switch_est', 'res_bus_sc', 'res_line_sc', 'res_trafo_sc', 'res_trafo3w_sc', 'res_ext_grid_sc', 'res_gen_sc', 'res_sgen_sc', 'res_switch_sc', 'res_bus_3ph', 'res_line_3ph', 'res_trafo_3ph', 'res_ext_grid_3ph', 'res_shunt_3ph', 'res_load_3ph', 'res_sgen_3ph', 'res_storage_3ph', 'res_asymmetric_load_3ph', 'res_asymmetric_sgen_3ph', 'user_pf_options'])
# net = pp.networks.GBnetwork()
# algorithm (str, “nr”) - algorithm that is used to solve the power flow problem.
# The following algorithms are available:
# “nr” Newton-Raphson (pypower implementation with numba accelerations)
# “iwamoto_nr” Newton-Raphson with Iwamoto multiplier (maybe slower than NR but more robust)
# “bfsw” backward/forward sweep (specially suited for radial and weakly-meshed networks)
# “gs” gauss-seidel (pypower implementation)
# “fdbx” fast-decoupled (pypower implementation)
# “fdxb” fast-decoupled (pypower implementation)
# print(net)
# print(net.keys())
def create_case3():
net = pp.create_empty_network()
net.sn_mva = 100
b0 = pp.create_bus(net, vn_kv=345., name='bus 0')
b1 = pp.create_bus(net, vn_kv=345., name='bus 1')
b2 = pp.create_bus(net, vn_kv=345., name='bus 2')
pp.create_ext_grid(net, bus=b0, vm_pu=1.02, name="Grid Connection")
pp.create_load(net, bus=b2, p_mw=10.3, q_mvar=3, name="Load")
# pp.create_gen(net, bus=b1, p_mw=0.5, vm_pu=1.03, name="Gen", max_p_mw=1)
pp.create_line(net, from_bus=b0, to_bus=b1, length_km=10, name='line 01', std_type='NAYY 4x50 SE')
pp.create_line(net, from_bus=b1, to_bus=b2, length_km=5, name='line 01', std_type='NAYY 4x50 SE')
pp.create_line(net, from_bus=b2, to_bus=b0, length_km=20, name='line 01', std_type='NAYY 4x50 SE')
net.line['c_nf_per_km'] = pd.Series(0., index=net.line['c_nf_per_km'].index, name=net.line['c_nf_per_km'].name)
return net
def remove_c_nf(net):
net.line['c_nf_per_km'] = pd.Series(0., index=net.line['c_nf_per_km'].index, name=net.line['c_nf_per_km'].name)
def unify_vn(net):
for node_id in range(net.bus['vn_kv'].shape[0]):
net.bus['vn_kv'][node_id] = max(net.bus['vn_kv'])
def get_trafo_z_pu(net):
# if random:
# replace_line_r_ohm = max(0., 0.1*np.random.normal(replace_line_r_ohm, np.abs(replace_line_r_ohm)*0.05))
# replace_line_x_ohm = max(0., 0.1*np.random.normal(replace_line_x_ohm, np.abs(replace_line_x_ohm)*0.05))
# for trafo_id in net.trafo.index:
# from_bus, to_bus = net.trafo.iloc[trafo_id]['hv_bus'], net.trafo.iloc[trafo_id]['lv_bus']
# pp.create_line_from_parameters(net, from_bus=from_bus, to_bus=to_bus, length_km=1.,
# r_ohm_per_km=replace_line_r_ohm, x_ohm_per_km=replace_line_r_ohm,
# c_nf_per_km=0., max_i_ka=3.e4,
# max_loading_percent=100.,
# type='ol')
# pp.drop_trafos(net, net.trafo.index, table='trafo')
for trafo_id in net.trafo.index:
net.trafo['i0_percent'][trafo_id] = 0.
net.trafo['pfe_kw'][trafo_id] = 0.
z_pu = net.trafo['vk_percent'].values / 100. * 1000. / net.sn_mva
r_pu = net.trafo['vkr_percent'].values / 100. * 1000. / net.sn_mva
x_pu = np.sqrt(z_pu**2 - r_pu**2)
return x_pu, r_pu
# raise NotImplementedError
def get_line_z_pu(net):
r = net.line['r_ohm_per_km'].values * net.line['length_km']
x = net.line['x_ohm_per_km'].values * net.line['length_km']
from_bus = net.line['from_bus']
to_bus = net.line['to_bus']
vn_kv_to = net.bus['vn_kv'][to_bus].to_numpy()
vn_kv_to = pd.Series(vn_kv_to)
zn = vn_kv_to**2 / net.sn_mva
r_pu = r/zn
x_pu = x/zn
return r_pu, x_pu
number_of_samples = 30000
number_of_processes = 1
test_case = 'case14v3'
base_net_create = pp.networks.case14
# base_net_create = create_case3
base_net = base_net_create()
base_net.bus['name'] = base_net.bus.index
print(base_net.bus)
print(base_net.line)
# Get Adjacency Matrix
bus_names = base_net.bus['name'].values.tolist()
n = base_net.bus.values.shape[0]
A = np.zeros((n, n))
# for edge1,edge2 in base_net.line[['from_bus', 'to_bus']].values:
# edge_1 = bus_names.index(edge1)
# edge_2 = bus_names.index(edge2)
# A[edge_1, edge_2] = 1
# A[edge_2, edge_1] = 1
multi_graph = pp.topology.create_nxgraph(base_net)
A = nx.adjacency_matrix(multi_graph).todense()
def generate_data(sublist_size):
edge_features_list = []
node_features_x_list = []
node_features_y_list = []
# graph_feature_list = []
while True:
# net = base_net
net = base_net_create()
# remove_c_nf(net)
# unify_vn(net)
trafo_r_pu, trafo_x_pu = get_trafo_z_pu(net)
net.bus['name'] = base_net.bus.index
r = net.line['r_ohm_per_km'].values
x = net.line['x_ohm_per_km'].values
# c = net.line['c_nf_per_km'].values
le = net.line['length_km'].values
# x = case['branch'][:, 3]
# b = case['branch'][:, 4]
# tau = case['branch'][:, 8] # ratio
Pg = net.gen['p_mw'].values
# Pmin =
Pd = net.load['p_mw'].values
Qd = net.load['q_mvar'].values
r = np.random.uniform(0.8*r, 1.2*r, r.shape[0])
x = np.random.uniform(0.8*x, 1.2*x, x.shape[0])
# c = np.random.uniform(0.8*c, 1.2*c, c.shape[0])
le = np.random.uniform(0.8*le, 1.2*le, le.shape[0])
# tau = np.random.uniform(0.8*tau, 1.2*tau, case['branch'].shape[0])
# angle = np.random.uniform(-0.2, 0.2, case['branch'].shape[0])
Vg = np.random.uniform(1.00, 1.05, net.gen['vm_pu'].shape[0])
Pg = np.random.normal(Pg, 0.1*np.abs(Pg), net.gen['p_mw'].shape[0])
# Pd = np.random.uniform(0.5*Pd, 1.5*Pd, net.load['p_mw'].shape[0])
Pd = np.random.normal(Pd, 0.1*np.abs(Pd), net.load['p_mw'].shape[0])
# Qd = np.random.uniform(0.5*Qd, 1.5*Qd, net.load['q_mvar'].shape[0])
Qd = np.random.normal(Qd, 0.1*np.abs(Qd), net.load['q_mvar'].shape[0])
net.line['r_ohm_per_km'] = r
net.line['x_ohm_per_km'] = x
net.gen['vm_pu'] = Vg
net.gen['p_mw'] = Pg
net.load['p_mw'] = Pd
net.load['q_mvar'] = Qd
try:
net['converged'] = False
pp.runpp(net, algorithm='nr', init="results", numba=False)
except:
if not net['converged']:
# print(f"net['converged'] = {net['converged']}")
print(f'Failed to converge, current sample number: {len(edge_features_list)}')
import pandapower as pp
continue
# Graph feature
# baseMVA = x[0]['baseMVA']
# === DEBUG ===
ybus_matrix = net._ppc["internal"]["Ybus"]
pandapower_bus_idx = 3
ppc_index = net._pd2ppc_lookups["bus"][pandapower_bus_idx]
# === DEBUG ===
# Create a vector od branch features including start and end nodes,r,x,b,tau,angle
edge_features = np.zeros((net.line.shape[0], 7))
edge_features[:, 0] = net.line['from_bus'].values + 1
edge_features[:, 1] = net.line['to_bus'].values + 1
edge_features[:, 2], edge_features[:, 3] = get_line_z_pu(net)
edge_features[:, 4] = 0
edge_features[:, 5] = 0
edge_features[:, 6] = 0
trafo_edge_features = np.zeros((net.trafo.shape[0], 7))
trafo_edge_features[:, 0] = net.trafo['hv_bus'].values + 1
trafo_edge_features[:, 1] = net.trafo['lv_bus'].values + 1
trafo_edge_features[:, 2], trafo_edge_features[:, 3] = get_trafo_z_pu(net)
trafo_edge_features[:, 4] = 0
trafo_edge_features[:, 5] = 0
trafo_edge_features[:, 6] = 0
edge_features = np.concatenate((edge_features, trafo_edge_features), axis=0)
# Create a vector of node features including index, type, Vm, Va, Pd, Qd, Gs, Bs, Pg
# case['bus'] = x[0]['bus']
node_features_x = np.zeros((n, 9))
node_features_x[:, 0] = net.bus['name'].values + 1# index
# Va ----This changes for every bus excecpt slack bus
node_features_x[:, 3] = np.zeros((n, )) #Va
# node_features_x[:, 6] = np.zeros((n,1)) # Gs
# node_features_x[:, 7] = np.zeros((n,1)) # Bs
# Vm is 1 if type is not "generator" else it is case['gen'][:,j]
vm = np.ones(n)
types = np.ones(n)*2
for j in range(net.gen.shape[0]):
# find index of case['gen'][j,0] in case['bus'][:,0]
index = np.where(net.gen['bus'].values[j] == net.bus['name'])[0][0]
vm[index] = net.gen['vm_pu'].values[j] # Vm = Vg
types[index] = 1 # type = generator
node_features_x[index, 8] = net.gen['p_mw'].values[j] / net.sn_mva # Pg / pu
node_features_x[:, 2] = vm # Vm
node_features_x[:, 1] = types # type
for j in range(net.load.shape[0]):
# find index of case['gen'][j,0] in case['bus'][:,0]
index = np.where(net.load['bus'].values[j] == net.bus['name'])[0][0]
node_features_x[index, 4] = Pd[j] / net.sn_mva # Pd / pu
node_features_x[index, 5] = Qd[j] / net.sn_mva # Qd / pu
# Create a vector of node features including index, type, Vm, Va, Pd, Qd, Gs, Bs
node_features_y = np.zeros((n, 8))
node_features_y[:, 0] = net.bus['name'].values + 1 # index
node_features_y[:, 1] = types # type
# Vm ----This changes for Load Buses
# if net.res_bus['vm_pu'].shape[0] == 0:
# pass
node_features_y[:, 2] = net.res_bus['vm_pu'] # Vm
# Va ----This changes for every bus excecpt slack bus
node_features_y[:, 3] = net.res_bus['va_degree'] # Va
node_features_y[:, 4] = net.res_bus['p_mw'] / net.sn_mva # P / pu
node_features_y[:, 5] = net.res_bus['q_mvar'] / net.sn_mva # Q / pu
# node_features_y[:, 6] = case['bus'][:, 4] # Gs
# node_features_y[:, 7] = case['bus'][:, 5] # Bs
edge_features_list.append(edge_features)
node_features_x_list.append(node_features_x)
node_features_y_list.append(node_features_y)
# graph_feature_list.append(baseMVA)
if len(edge_features_list) == sublist_size:
break
elif len(edge_features_list) % 10 == 0:
print(f'[Process {os.getpid()}] Current sample number: {len(edge_features_list)}')
return edge_features_list, node_features_x_list, node_features_y_list
def generate_data_parallel(num_samples, num_processes):
sublist_size = num_samples // num_processes
pool = mp.Pool(processes=num_processes)
results = pool.map(generate_data, [sublist_size]*num_processes)
pool.close()
pool.join()
edge_features_list = []
node_features_x_list = []
node_features_y_list = []
for sub_res in results:
edge_features_list += sub_res[0]
node_features_x_list += sub_res[1]
node_features_y_list += sub_res[2]
return edge_features_list, node_features_x_list, node_features_y_list
if __name__ == '__main__':
# Generate data
edge_features_list, node_features_x_list, node_features_y_list = generate_data_parallel(number_of_samples, number_of_processes)
# Turn the lists into numpy arrays
edge_features = np.array(edge_features_list)
node_features_x = np.array(node_features_x_list)
node_features_y = np.array(node_features_y_list)
# graph_features = np.array(graph_feature_list)
# Print the shapes
print(f'Adjacency matrix shape: {A.shape}')
print(f'edge_features shape: {edge_features.shape}')
print(f'node_features_x shape: {node_features_x.shape}')
print(f'node_features_y shape: {node_features_y.shape}')
# print(f'graph_features shape: {graph_features.shape}')
print(f'range of edge_features "from": {np.min(edge_features[:,:,0])} - {np.max(edge_features[:,:,0])}')
print(f'range of edge_features "to": {np.min(edge_features[:,:,1])} - {np.max(edge_features[:,:,1])}')
print(f'range of node_features_x "index": {np.min(node_features_x[:,:,0])} - {np.max(node_features_x[:,:,0])}')
print(f'range of node_features_y "index": {np.min(node_features_y[:,:,0])} - {np.max(node_features_y[:,:,0])}')
# print(f"A. {A}")
# print(f"edge_features. {edge_features}")
# print(f"node_features_x. {node_features_x}")
# print(f"node_features_y. {node_features_y}")
# save the features
os.makedirs("./data/raw", exist_ok=True)
with open("./data/raw/"+test_case+"_edge_features.npy", 'wb') as f:
np.save(f, edge_features)
with open("./data/raw/"+test_case+"_node_features_x.npy", 'wb') as f:
np.save(f, node_features_x)
with open("./data/raw/"+test_case+"_node_features_y.npy", 'wb') as f:
np.save(f, node_features_y)
# with open("./data/"+test_case+"_graph_features.npy", 'wb') as f:
# np.save(f, graph_features)
with open("./data/raw/"+test_case+"_adjacency_matrix.npy", 'wb') as f:
np.save(f, A)
exit()
# Computation time experimental comparison beginning (will be moved to other file later on)
# calculate power flow for every algorithm and calculate time
algorithms = ["nr", "iwamoto_nr", "gs", "fdbx", "fdxb"]
times = []
for a in algorithms:
t0 = time.time()
# pp.runpp(net, algorithm=a)
pp.runpp(net, algorithm=a, init="results", numba=False)
t1 = time.time()
times.append(t1 - t0)
for a in algorithms:
print(f"{a}: {times[algorithms.index(a)]}")
# print(net.res_bus.vm_pu)
# print(net.res_line.loading_percent)
# calculate power flow for every algorithm and calculate time 1000 times
# algorithms = ["nr", "iwamoto_nr", "gs", "fdbx", "fdxb"]
algorithms = ["nr", "iwamoto_nr", "fdbx", "fdxb"]
times = []
for a in algorithms:
print(a)
t0 = time.time()
for i in range(1000):
pp.runpp(net, algorithm=a, init="auto", numba=False)
t1 = time.time()
times.append(t1 - t0)
for a in algorithms:
print(f"{a}: {times[algorithms.index(a)]/1000}")
"""
this file defines the class of PowerFlowData, which is used to load the data of Power Flow
"""
import os
from typing import Callable, Optional, List, Tuple, Union
import torch
import numpy as np
import torch.utils.data as data
import torch_geometric
from torch_geometric.data import Data, InMemoryDataset
from torch_geometric.utils import from_scipy_sparse_matrix, dense_to_sparse
import matplotlib.pyplot as plt
from torch_geometric.datasets import Planetoid
ori_feature_names_x = [
'index', # - removed
'type', # --- one-hot encoded, 0, 1, 2, 3
'voltage magnitude', # --- this matters, 4,
'voltage angle', # --- this matters, 5,
'Pd', # --- this matters, preprocessed as Pd-Pg 6,
'Qd', # --- this matters 7,
'Gs', # - equivalent to Pd, Qd 8,
'Bs', # - equivalent to Pd, Qd 9,
'Pg' # - removed
]
feature_names_x = [
'type_0', # --- one-hot encoded, 0,
'type_1', # --- one-hot encoded, 1,
'type_2', # --- one-hot encoded, 2,
'type_3', # --- one-hot encoded, 3,
'voltage magnitude', # --- this matters, 4,
'voltage angle', # --- this matters, 5,
'Pd', # --- this matters, preprocessed as Pd-Pg 6,
'Qd', # --- this matters 7,
'Gs', # - equivalent to Pd, Qd 8,
'Bs', # - equivalent to Pd, Qd 9,
'to_predict_voltage_magnitude', # 10,
'to_predict_voltage_angle', # 11,
'to_predict_Pd', # 12,
'to_predict_Qd' # 13,
'to_predict_Gs', # 14,
'to_predict_Bs' # 15,
]
ori_feature_names_y = [
'index', # - removed
'type', # - removed
'voltage magnitude', # --- we care about this
'voltage angle', # --- we care about this
'active power', # --- we care about this
'reactive power', # --- we care about this
'Gs', # -
'Bs' # -
]
feature_names_y = [
'voltage magnitude', # --- we care about this
'voltage angle', # --- we care about this
'active power', # --- we care about this
'reactive power', # --- we care about this
'Gs', # -
'Bs'
]
edge_feature_names = [
'r', # --- this matters, resistance, pu
'x', # --- this matters, reactance, pu
'b',
'tau',
'angle'
]
class PowerFlowData(InMemoryDataset):
"""PowerFlowData(InMemoryDataset)
Parameters:
root (str, optional) – Root directory where the dataset should be saved. (optional: None)
pre_filter (callable)- A function
Comments:
we actually do not need adjacency matrix, since we can use edge_index to represent the graph from `edge_features`
Returns:
class instance of PowerFlowData
"""
partial_file_names = [
"adjacency_matrix.npy",
"edge_features.npy",
"node_features_x.npy",
"node_features_y.npy"
]
split_order = {
"train": 0,
"val": 1,
"test": 2
}
mixed_cases = [
'118v2',
'14v2',
]
def __init__(self,
root: str,
case: str = '14',
split: Optional[List[float]] = None,
task: str = "train",
transform: Optional[Callable] = None,
pre_transform: Optional[Callable] = None,
pre_filter: Optional[Callable] = None,
normalize=True):
assert len(split) == 3
assert task in ["train", "val", "test"]
self.normalize = normalize
self.case = case # THIS MUST BE EXECUTED BEFORE super().__init__() since it is used in raw_file_names and processed_file_names
self.split = split
self.task = task
super().__init__(root, transform, pre_transform, pre_filter)
self.mask = torch.tensor([])
self.data, self.slices, self.mask = self._normalize_dataset(
*torch.load(self.processed_paths[0])) # necessary, do not forget!
def get_data_dimensions(self):
return self[0].x.shape[1], self[0].y.shape[1], self[0].edge_attr.shape[1]
def get_data_means_stds(self):
assert self.normalize == True
return self.xymean[:1, :], self.xystd[:1, :], self.edgemean[:1, :], self.edgestd[:1, :]
def _normalize_dataset(self, data, slices) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
if not self.normalize:
# TODO an actual mask, perhaps never necessary though
return data, slices, torch.tensor([])
# selecting the right features
# for x
print(data.x.shape)
print(data)
# exit()
data.x[:, 4] = data.x[:, 4] - data.x[:, 8] # Pd = Pd - Pg
# + 4 for the one-hot encoding for four node types, -2 because we remove the index and Pg
template = torch.zeros((data.x.shape[0], data.x.shape[1] + 3 - 2))
template[:, 0:4] = torch.nn.functional.one_hot(
data.x[:, 1].type(torch.int64), num_classes=4)
template[:, 4:10] = data.x[:, 2:8]
data.x = template
# for y
data.y = data.y[:, 2:]
# SHAPE NOW: torch.Size([14, 10]) torch.Size([14, 6]) for x and y
# normalizing
# for node attributes
xy = torch.concat([data.x[:, 4:], data.y], dim=0)
# 6 for:
mean = torch.mean(xy, dim=0).unsqueeze(
dim=0).expand(data.x.shape[0], 6)
std = torch.std(xy, dim=0).unsqueeze(dim=0).expand(
data.x.shape[0], 6) # Vm, Va, Pd, Qd, Gs, Bs
self.xymean, self.xystd = mean, std
# + 0.0000001 to avoid NaN's because of division by zero
data.x[:, 4:] = (data.x[:, 4:] - mean) / (std + 0.0000001)
data.y = (data.y - mean) / (std + 0.0000001)
# for edge attributes
mean = torch.mean(data.edge_attr, dim=0).unsqueeze(dim=0).expand(
data.edge_attr.shape[0], data.edge_attr.shape[1])
std = torch.std(data.edge_attr, dim=0).unsqueeze(dim=0).expand(
data.edge_attr.shape[0], data.edge_attr.shape[1])
self.edgemean, self.edgestd = mean, std
data.edge_attr = (data.edge_attr - mean) / (std + 0.0000001)
# adding the mask
# where x and y are unequal, the network must predict
# 1 where value changed, 0 where it did not change
unequal = (data.x[:, 4:] != data.y).float()
data.prediction_mask = unequal
data.x = torch.concat([data.x, unequal], dim=1)
return data, slices, unequal
@property
def raw_file_names(self) -> List[str]:
if self.case != 'mixed':
return ["case"+f"{self.case}"+"_"+name for name in self.partial_file_names]
else:
return ["case"+f"{case}"+"_"+name for case in self.mixed_cases for name in self.partial_file_names]
@property
def processed_file_names(self) -> List[str]:
return ["case"+f"{self.case}"+"_processed_data.pt"]
def len(self):
return self.slices['x'].shape[0]-1
# def get(self, idx: int) -> Data: # override
# return self.data[idx]
def process(self):
# then use from_scipy_sparse_matrix()
assert len(self.raw_paths) % 4 == 0
raw_paths_per_case = [[self.raw_paths[i], self.raw_paths[i+1], self.raw_paths[i+2], self.raw_paths[i+3],] for i in range(0, len(self.raw_paths), 4)]
data_list = []
for case, raw_paths in enumerate(raw_paths_per_case):
adj_mat = dense_to_sparse(torch.from_numpy(np.load(raw_paths[0])))
edge_features = torch.from_numpy(np.load(raw_paths[1])).float()
node_features_x = torch.from_numpy(np.load(raw_paths[2])).float()
node_features_y = torch.from_numpy(np.load(raw_paths[3])).float()
if self.split is not None:
split_len = [int(len(node_features_x) * i) for i in self.split]
edge_features = torch.split(edge_features, split_len, dim=0)[
self.split_order[self.task]]
node_features_x = torch.split(node_features_x, split_len, dim=0)[
self.split_order[self.task]]
node_features_y = torch.split(node_features_y, split_len, dim=0)[
self.split_order[self.task]]
per_case_data_list = [
Data(
x=node_features_x[i],
y=node_features_y[i],
edge_index=edge_features[i, :, 0:2].T.to(torch.long)-1,
edge_attr=edge_features[i, :, 2:],
) for i in range(len(node_features_x))
]
data_list.extend(per_case_data_list)
if self.pre_filter is not None: # filter out some data
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
def main():
try:
# shape = (N, n_edges, 7) (from, to, ...)
edge_features = np.load("data/raw/case14_edge_features.npy")
# shape = (N, n_nodes, n_nodes)
adj_matrix = np.load("data/raw/case14_adjacency_matrix.npy")
# shape = (N, n_nodes, 9)
node_features_x = np.load("data/raw/case14_node_features_x.npy")
# shape = (N, n_nodes, 8)
node_features_y = np.load("data/raw/case14_node_features_y.npy")
except FileNotFoundError:
print("File not found.")
print(f"edge_features.shape = {edge_features.shape}")
print(f"adj_matrix.shape = {adj_matrix.shape}")
print(f"node_features_x.shape = {node_features_x.shape}")
print(f"node_features_y.shape = {node_features_y.shape}")
trainset = PowerFlowData(root="data", case=14,
split=[.5, .2, .3], task="train")
train_loader = torch_geometric.loader.DataLoader(
trainset, batch_size=12, shuffle=True)
print(len(trainset))
print(trainset[0])
print(next(iter(train_loader)))
pass
if __name__ == "__main__":
main()
\ No newline at end of file
__all__ = ['PowerFlowData']
\ No newline at end of file
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"c:\\Users\\nataxcan\\miniconda3\\envs\\graphml_proj\\lib\\site-packages\\torch_geometric\\data\\in_memory_dataset.py:157: UserWarning: It is not recommended to directly access the internal storage format `data` of an 'InMemoryDataset'. If you are absolutely certain what you are doing, access the internal storage via `InMemoryDataset._data` instead to suppress this warning. Alternatively, you can access stacked individual attributes of every graph via `dataset.{attr_name}`.\n",
" warnings.warn(msg)\n"
]
}
],
"source": [
"import torch\n",
"from torch_geometric.loader import DataLoader\n",
"from datasets.PowerFlowData import PowerFlowData\n",
"\n",
"testset = PowerFlowData(root='data', case='14', split=[.5, .2, .3], task='test')\n",
"test_loader = DataLoader(testset, batch_size=128, shuffle=False)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Data(x=[14, 9], edge_index=[2, 20], edge_attr=[20, 5], y=[14, 8])\n",
"Data(x=[14, 16], edge_index=[2, 20], edge_attr=[20, 5], y=[14, 6])\n"
]
}
],
"source": [
"# here you can experiment with the normalization function\n",
"\n",
"def transform(data):\n",
" ## selecting the right features\n",
" # for x\n",
" data.x[:,4] = data.x[:,4] - data.x[:,8] # Pd = Pd - Pg\n",
" # + 3 for the one-hot encoding for four node types, -2 because we remove the index and Pg\n",
" template = torch.zeros((data.x.shape[0], data.x.shape[1] + 3 - 2))\n",
" template[:,0:4] = torch.nn.functional.one_hot(data.x[:,1].type(torch.int64))\n",
" template[:,4:10] = data.x[:,2:8]\n",
" data.x = template\n",
" # for y\n",
" # - 2 for removing index and type\n",
" data.y = data.y[:,2:]\n",
"\n",
" # SHAPE NOW: torch.Size([14, 10]) torch.Size([14, 6]) for x and y\n",
"\n",
" ## normalizing\n",
" # for node attributes\n",
" xy = torch.concat([data.x[:,4:], data.y], dim=0)\n",
" mean = torch.mean(xy, dim=0).unsqueeze(dim=0).expand(data.x.shape[0], 6)# 6 for:\n",
" std = torch.std(xy, dim=0).unsqueeze(dim=0).expand(data.x.shape[0], 6)# Vm, Va, Pd, Qd, Gs, Bs\n",
" data.x[:,4:] = (data.x[:,4:] - mean) / (std + 0.1) # + 0.1 to avoid NaN's because of division by zero\n",
" data.y = (data.y - mean) / (std + 0.1)\n",
" # for edge attributes\n",
" mean = torch.mean(data.edge_attr, dim=0).unsqueeze(dim=0).expand(data.edge_attr.shape[0], data.edge_attr.shape[1])\n",
" std = torch.std(data.edge_attr, dim=0).unsqueeze(dim=0).expand(data.edge_attr.shape[0], data.edge_attr.shape[1])\n",
" data.edge_attr = (data.edge_attr - mean) / (std + 0.1)\n",
"\n",
" ## adding the mask\n",
" # where x and y are unequal, the network must predict\n",
" unequal = (data.x[:,4:] != data.y).float()\n",
" data.x = torch.concat([data.x, unequal], dim=1)\n",
"\n",
" return data\n",
"\n",
"notnormed = PowerFlowData(root='data', case='14', split=[.5, .2, .3], task='test', normalize=False)\n",
"data1 = notnormed[90]\n",
"print(data1)\n",
"print(transform(data1))\n",
"\n",
"# # print(xold.x[:,2:8]) # the old power values\n",
"# # print(xold.y[:,2].float())\n",
"# # print(xnew.x[:,4:10]) # the normalized ones\n",
"# # print(xnew.x[:,10:]) # the mask to know which "
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Data(x=[14, 9], edge_index=[2, 20], edge_attr=[20, 5], y=[14, 8])\n",
"Data(x=[14, 16], edge_index=[2, 20], edge_attr=[20, 5], y=[14, 6])\n"
]
}
],
"source": [
"# here you can see it in action\n",
"\n",
"notnormed = PowerFlowData(root='data', case='14', split=[.5, .2, .3], task='test', normalize=False)\n",
"normed = PowerFlowData(root='data', case='14', split=[.5, .2, .3], task='test', normalize=True)\n",
"data1 = notnormed[90]\n",
"data2 = normed[90]\n",
"\n",
"print(data1)\n",
"print(data2)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "graphml_proj",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.16"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}
FROM image.sourcefind.cn:5000/dcu/admin/base/pytorch:2.4.1-ubuntu22.04-dtk25.04.1-py3.10
\ No newline at end of file
icon.png

67.5 KB

Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment