forked from fio1982/GRec
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathModels.py
57 lines (47 loc) · 2.58 KB
/
Models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import torch
import torch.nn as nn
import torch.sparse as sparse
import torch.nn.functional as F
import numpy as np
#This source file is based on the NGCF framwork published by Xiang Wang et al.
#We would like to thank and offer our appreciation to them.
#Original algorithm can be found in paper: Neural Graph Collaborative Filtering, SIGIR 2019.
class NGCF(nn.Module):
def __init__(self, n_users, n_items, embedding_dim, weight_size, dropout_list):
super().__init__()
self.n_users = n_users
self.n_items = n_items
self.embedding_dim = embedding_dim
self.weight_size = weight_size
self.n_layers = len(self.weight_size)
self.dropout_list = nn.ModuleList()
self.GC_Linear_list = nn.ModuleList()
self.Bi_Linear_list = nn.ModuleList()
self.weight_size = [self.embedding_dim] + self.weight_size
for i in range(self.n_layers):
self.GC_Linear_list.append(nn.Linear(self.weight_size[i], self.weight_size[i+1]))
self.Bi_Linear_list.append(nn.Linear(self.weight_size[i], self.weight_size[i+1]))
self.dropout_list.append(nn.Dropout(dropout_list[i]))
self.user_embedding = nn.Embedding(n_users, embedding_dim)
self.item_embedding = nn.Embedding(n_items, embedding_dim)
self._init_weight_()
def _init_weight_(self):
nn.init.xavier_uniform_(self.user_embedding.weight)
nn.init.xavier_uniform_(self.item_embedding.weight)
def forward(self, adj):
ego_embeddings = torch.cat((self.user_embedding.weight, self.item_embedding.weight), dim=0)
all_embeddings = [ego_embeddings]
for i in range(self.n_layers):
side_embeddings = torch.sparse.mm(adj, ego_embeddings)
sum_embeddings = F.leaky_relu(self.GC_Linear_list[i](side_embeddings))
bi_embeddings = torch.mul(ego_embeddings, side_embeddings)
bi_embeddings = F.leaky_relu(self.Bi_Linear_list[i](bi_embeddings))
# here remove the bi_embeddings can remove the inner product of e_i and e_u;
ego_embeddings = sum_embeddings + bi_embeddings
#ego_embeddings = sum_embeddings
ego_embeddings = self.dropout_list[i](ego_embeddings)
norm_embeddings = F.normalize(ego_embeddings, p=2, dim=1)
all_embeddings += [norm_embeddings]
all_embeddings = torch.cat(all_embeddings, dim=1)
u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0)
return u_g_embeddings, i_g_embeddings