1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import torch.nn as nn
import torch.nn.functional as F
import torch
import math
import numpy as np

class GNN_Core(nn.Module):
def __init__(self,A,inC,outC):
super(GNN_Core, self).__init__()
# Features Matrix X: [N,D]
# Adjacent Matrix A: [N,D]
N=A.shape[0]
self.self_adjecent=nn.Sequential(
nn.Linear(N,64),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Linear(64,N)
)

self.adjacent=nn.Sequential(
nn.Linear(inC,32),
nn.BatchNorm1d(32),
nn.ReLU(),
nn.Linear(32,outC)
)
self.A=A

def forward(self,x,w):
# By the way, there is a small knowledge which about Diag
# We can get Diag by torch.diag(M) and rebuild the dimension by torch.diag_embed(x)
# by this approach, we also can easily make diag to 0.
# Ori-torch.diag_embed(torch.diag(Ori)
sA=self.self_adjecent(torch.diag(x)) # Only need to train the diag of matrix
A=self.adjacent(torch.bmm(self.A,x)) # simulate the multiplication between A and X
A=w*A # w means normalization function
# A [B,N,O]
# sA [N]
return (A.T+sA).T


class GNN(nn.Module):
def __init__(self,A,hidden_num,hidden_out):
super(GNN, self).__init__()
self.hiddens=nn.Sequential()
for i in range(hidden_num):
self.hiddens.append(GNN_Core(A,hidden_out[i],hidden_out[i+1]))
if i<hidden_num-1:
self.hiddens.append(nn.BatchNorm1d(hidden_out[i+1]))
self.hiddens.append(nn.ReLU())

def forward(self,x):
x=self.hiddens(x)
return F.softmax(x)



class Loss(nn.Module):
def __init__(self):
super(Loss, self).__init__()
def forward(self,x,y):
return torch.sum(y*np.log(x)+(1-y)*np.log(1-x))

def train():
model=GNN(None,3,128)
loss_func=Loss()

optimizer=torch.optim.SGD()
data=None
y=None

# 简单模拟下
y_pre=model(data)
optimizer.zero_grad()
loss=loss_func(y_pre,y)
loss.backward()
optimizer.step()