1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
| import torch.nn as nn import torch.nn.functional as F import torch import math import numpy as np
class GNN_Core(nn.Module): def __init__(self,A,inC,outC): super(GNN_Core, self).__init__() N=A.shape[0] self.self_adjecent=nn.Sequential( nn.Linear(N,64), nn.BatchNorm1d(64), nn.ReLU(), nn.Linear(64,N) )
self.adjacent=nn.Sequential( nn.Linear(inC,32), nn.BatchNorm1d(32), nn.ReLU(), nn.Linear(32,outC) ) self.A=A
def forward(self,x,w): sA=self.self_adjecent(torch.diag(x)) A=self.adjacent(torch.bmm(self.A,x)) A=w*A return (A.T+sA).T
class GNN(nn.Module): def __init__(self,A,hidden_num,hidden_out): super(GNN, self).__init__() self.hiddens=nn.Sequential() for i in range(hidden_num): self.hiddens.append(GNN_Core(A,hidden_out[i],hidden_out[i+1])) if i<hidden_num-1: self.hiddens.append(nn.BatchNorm1d(hidden_out[i+1])) self.hiddens.append(nn.ReLU())
def forward(self,x): x=self.hiddens(x) return F.softmax(x)
class Loss(nn.Module): def __init__(self): super(Loss, self).__init__() def forward(self,x,y): return torch.sum(y*np.log(x)+(1-y)*np.log(1-x))
def train(): model=GNN(None,3,128) loss_func=Loss()
optimizer=torch.optim.SGD() data=None y=None
y_pre=model(data) optimizer.zero_grad() loss=loss_func(y_pre,y) loss.backward() optimizer.step()
|