图神经网络节点特征平滑算法实现
from future import division from future import print_function
1. 导入所需的模块和库
import argparse import numpy as np import scipy.sparse as sp import torch import torch.nn.functional as F
2. 定义参数并解析参数
parser = argparse.ArgumentParser() parser.add_argument('--seed', type=int, default=42, help='Random seed.') parser.add_argument('--dataset', type=str, default='cora', help='type of dataset.') hops = 20 r_list = [0, 0.1, 0.2, 0.3, 0.4, 0.5] args = parser.parse_args()
def sparse_mx_to_torch_sparse_tensor(sparse_mx): '''Convert a scipy sparse matrix to a torch sparse tensor.''' sparse_mx = sparse_mx.tocoo().astype(np.float32) indices = torch.from_numpy( np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)) values = torch.from_numpy(sparse_mx.data) shape = torch.Size(sparse_mx.shape) return torch.sparse.FloatTensor(indices, values, shape)
def normalize_adj(mx, r): '''Row-normalize sparse matrix''' mx = sp.coo_matrix(mx) + sp.eye(mx.shape[0]) rowsum = np.array(mx.sum(1)) r_inv_sqrt_left = np.power(rowsum, r-1).flatten() r_inv_sqrt_left[np.isinf(r_inv_sqrt_left)] = 0. r_mat_inv_sqrt_left = sp.diags(r_inv_sqrt_left)
r_inv_sqrt_right = np.power(rowsum, -r).flatten()
r_inv_sqrt_right[np.isinf(r_inv_sqrt_right)] = 0.
r_mat_inv_sqrt_right = sp.diags(r_inv_sqrt_right)
adj_normalized = mx.dot(r_mat_inv_sqrt_left).transpose().dot(r_mat_inv_sqrt_right).tocoo()
return sparse_mx_to_torch_sparse_tensor(adj_normalized)
def run(args): # 3. 加载数据和处理邻接矩阵 adj = torch.tensor([ [1., 1., 0., 0., 0., 0., 0., 1., 0., 0.], [1., 1., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 1., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 1., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 1., 1., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 1., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 1., 0.], [0., 0., 0., 0., 0., 1., 0., 0., 0., 1.], ]) features=torch.tensor([ [1., 1.2, 0., 0., 0., 0., 0., 1., 0.2, 0.], [1., 1., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 1., 0., 0.5, 0., 0., 0., 0., 0.], [0., 0., 0., 1., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 1., 1., 0., 0., 0., 0., 0.], [0., 0., 2., 0., 0., 1., 0., 0., 0., 0.], [0., 0., 1., 0., 0., 0., 1., 0., 0., 0.], [0., 0., 0., 0.4, 0., 0., 0., 1., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 1., 0.], [0., 0., 0., 0., 0., 1., 0., 0., 0., 1.], ]) n_nodes, feat_dim = features.shape
# 4. 对于每个迭代次数n,进行下列操作
for hop in range(hops, hops+1): # hop为迭代次数n
# 5. 初始化输入特征为0
input_features = 0.
# 6. 针对每个r值进行下列操作
for r in r_list:
# 7. 对邻接矩阵进行归一化
adj_norm = normalize_adj(adj, r) # A^= D^ r_t-1 A^ ~ D^ -r_t,DAD
# 8. 初始化特征列表并将原始特征添加进列表
features_list = []
features_list.append(features)
# 9. 将邻接矩阵与最新的特征矩阵相乘,并将结果添加到特征列表中
features_list.append(torch.spmm(adj_norm, features_list[-1])) # X,矩阵相乘A*X
# 10. 计算每一层特征之间的权重
weight_list = []
norm_fea = torch.norm(features, 2, 1).add(1e-10) # 正则化
for fea in features_list:
norm_cur = torch.norm(fea, 2, 1).add(1e-10)
temp = torch.div((features*fea).sum(1), norm_fea) # 计算相似度
temp = torch.div(temp, norm_cur)
weight_list.append(temp.unsqueeze(-1))
weight = F.softmax(torch.cat(weight_list, dim=1), dim=1) # 权重归一化
input_feas = []
# 11. 对每个节点进行平滑操作
for i in range(n_nodes):
fea = 0.
for j in range(hop+1):
fea += (weight[i][j]*features_list[j][i]).unsqueeze(0)
input_feas.append(fea)
input_feas = torch.cat(input_feas, dim=0)
# 12. 如果是第一个r值,则将平滑后的特征作为输入特征
if r == r_list[0]:
input_features = input_feas
else:
# 13. 如果不是第一个r值,则将平滑后的特征与原始输入特征进行拼接,并取最大值作为新的输入特征
temp = []
temp.append(input_features.unsqueeze(0))
temp.append(input_feas.unsqueeze(0))
input_features = torch.cat(temp, dim=0).max(0)[0]# (input_features,input_feas)(input_features,input_feas)
# 14. 计算相似度
sim = torch.sigmoid(torch.mm(input_features, input_features.T))
15. 运行主函数
if name == 'main': run(args)
原文地址: https://www.cveoy.top/t/topic/nsIX 著作权归作者所有。请勿转载和采集!