Skip to content

Commit 93e6837

Browse files
authored
Add files via upload
1 parent 3353f25 commit 93e6837

File tree

5 files changed

+786
-0
lines changed

5 files changed

+786
-0
lines changed

code/models/awareness.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
# coding: utf-8
2+
3+
import torch.nn as nn
4+
5+
6+
class DAN(nn.Module):
7+
"""Distance awareness network.
8+
"""
9+
10+
def __init__(self, total_locs):
11+
super(DAN, self).__init__()
12+
self.net = nn.Sequential(
13+
nn.Linear(in_features=total_locs, out_features=total_locs),
14+
nn.Sigmoid(),
15+
)
16+
self.weight_init()
17+
18+
def forward(self, x):
19+
weights = self.net(x)
20+
return weights
21+
22+
def weight_init(self):
23+
for param in self.parameters():
24+
param.data.uniform_(-0.05, 0.05)

code/models/discriminator.py

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
# coding: utf-8
2+
3+
import torch
4+
import torch.nn as nn
5+
import torch.nn.functional as F
6+
7+
8+
class Discriminator(nn.Module):
9+
"""Basic discriminator.
10+
"""
11+
12+
def __init__(
13+
self,
14+
total_locations=8606,
15+
embedding_net=None,
16+
embedding_dim=64,
17+
dropout=0.6):
18+
super(Discriminator, self).__init__()
19+
num_filters = [100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160]
20+
filter_sizes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20]
21+
if embedding_net:
22+
self.embedding = embedding_net
23+
else:
24+
self.embedding = nn.Embedding(
25+
num_embeddings=total_locations,
26+
embedding_dim=embedding_dim)
27+
self.convs = nn.ModuleList([nn.Conv2d(1, n, (f, embedding_dim)) for (
28+
n, f) in zip(num_filters, filter_sizes)])
29+
self.highway = nn.Linear(sum(num_filters), sum(num_filters))
30+
self.dropout = nn.Dropout(p=dropout)
31+
self.linear = nn.Linear(sum(num_filters), 2)
32+
self.init_parameters()
33+
34+
def forward(self, x):
35+
"""
36+
Args:
37+
x: (batch_size * seq_len)
38+
"""
39+
emb = self.embedding(x).unsqueeze(
40+
1) # batch_size * 1 * seq_len * emb_dim
41+
# [batch_size * num_filter * length]
42+
convs = [F.relu(conv(emb)).squeeze(3) for conv in self.convs]
43+
pools = [F.max_pool1d(conv, conv.size(2)).squeeze(2)
44+
for conv in convs] # [batch_size * num_filter]
45+
pred = torch.cat(pools, 1) # batch_size * num_filters_sum
46+
highway = self.highway(pred)
47+
pred = torch.sigmoid(highway) * F.relu(highway) + \
48+
(1. - torch.sigmoid(highway)) * pred
49+
pred = F.log_softmax(self.linear(self.dropout(pred)), dim=-1)
50+
return pred
51+
52+
def init_parameters(self):
53+
for param in self.parameters():
54+
param.data.uniform_(-0.05, 0.05)
55+
56+
57+
class TCDiscriminator(nn.Module):
58+
59+
def __init__(self,
60+
total_locations=8606,
61+
embedding_net=None,
62+
sembedding_dim=64,
63+
tembedding_dim=16,
64+
dropout=0.6):
65+
super(TCDiscriminator, self).__init__()
66+
num_filters = [100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160]
67+
filter_sizes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20]
68+
if embedding_net:
69+
self.tembedding = embedding_net[0]
70+
self.sembedding = embedding_net[1]
71+
else:
72+
self.tembedding = nn.Embedding(
73+
num_embeddings=total_locations,
74+
embedding_dim=tembedding_dim)
75+
self.sembedding = nn.Embedding(
76+
num_embeddings=total_locations,
77+
embedding_dim=sembedding_dim)
78+
self.convs = nn.ModuleList([nn.Conv2d(1, n, (f, tembedding_dim + sembedding_dim)) for (
79+
n, f) in zip(num_filters, filter_sizes)])
80+
self.highway = nn.Linear(sum(num_filters), sum(num_filters))
81+
self.dropout = nn.Dropout(p=dropout)
82+
self.linear = nn.Linear(sum(num_filters), 2)
83+
self.init_parameters()
84+
85+
def forward(self, xt, xs):
86+
"""
87+
Args:
88+
x: (batch_size * seq_len)
89+
"""
90+
temb = self.tembedding(xt)
91+
semb = self.sembedding(xs)
92+
emb = torch.cat([temb, semb], dim=-1).unsqueeze(1)
93+
# [batch_size * num_filter * length]
94+
convs = [F.relu(conv(emb)).squeeze(3) for conv in self.convs]
95+
pools = [F.max_pool1d(conv, conv.size(2)).squeeze(2)
96+
for conv in convs] # [batch_size * num_filter]
97+
pred = torch.cat(pools, 1) # batch_size * num_filters_sum
98+
highway = self.highway(pred)
99+
pred = torch.sigmoid(highway) * F.relu(highway) + \
100+
(1. - torch.sigmoid(highway)) * pred
101+
pred = F.log_softmax(self.linear(self.dropout(pred)), dim=-1)
102+
return pred
103+
104+
def init_parameters(self):
105+
for param in self.parameters():
106+
param.data.uniform_(-0.05, 0.05)

code/models/gan_loss.py

Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
# coding: utf-8
2+
import pdb
3+
import torch
4+
import numpy as np
5+
import torch.nn as nn
6+
from torch.autograd import Variable
7+
8+
9+
class GANLoss(nn.Module):
10+
"""Reward-Refined NLLLoss Function for adversial training of Gnerator"""
11+
12+
def __init__(self):
13+
super(GANLoss, self).__init__()
14+
15+
def forward(self, prob, target, reward, device, ploss=False):
16+
"""
17+
Args:
18+
prob: (N, C), torch Variable
19+
target : (N, ), torch Variable
20+
reward : (N, ), torch Variable
21+
"""
22+
N = target.size(0)
23+
C = prob.size(1)
24+
one_hot = torch.zeros((N, C))
25+
if prob.is_cuda:
26+
one_hot = one_hot.to(device)
27+
one_hot.scatter_(1, target.data.view((-1, 1)), 1)
28+
one_hot = one_hot.type(torch.ByteTensor)
29+
one_hot = Variable(one_hot)
30+
if prob.is_cuda:
31+
one_hot = one_hot.to(device)
32+
loss = torch.masked_select(prob, one_hot)
33+
loss = loss * reward
34+
loss = -torch.sum(loss)
35+
return loss
36+
37+
class distance_loss(nn.Module):
38+
39+
def __init__(self, datasets, device):
40+
super(distance_loss, self).__init__()
41+
if datasets == 'mobile':
42+
with open('../data/mobile/gps') as f:
43+
gpss = f.readlines()
44+
else:
45+
with open('../data/geolife/gps') as f:
46+
gpss = f.readlines()
47+
self.X = []
48+
self.Y = []
49+
for gps in gpss:
50+
x, y = float(gps.split()[0]), float(gps.split()[1])
51+
self.X.append(x)
52+
self.Y.append(y)
53+
self.X = torch.Tensor(np.array(self.X)).float()
54+
self.Y = torch.Tensor(np.array(self.Y)).float()
55+
self.X = self.X.to(device)
56+
self.Y = self.Y.to(device)
57+
58+
def forward(self, x):
59+
"""
60+
61+
:param x: generated sequence, batch_size * seq_len
62+
:return:
63+
"""
64+
x = x.long()
65+
x1 = torch.index_select(self.X, 0, x[:, :-1].contiguous().view(-1))
66+
x2 = torch.index_select(self.X, 0, x[:, 1:].contiguous().view(-1))
67+
y1 = torch.index_select(self.Y, 0, x[:, :-1].contiguous().view(-1))
68+
y2 = torch.index_select(self.Y, 0, x[:, 1:].contiguous().view(-1))
69+
dx = x1 - x2
70+
dy = y1 - y2
71+
loss = dx**2 + dy**2
72+
loss = torch.sum(loss) / loss.size(0)
73+
return loss
74+
75+
76+
class period_loss(nn.Module):
77+
78+
def __init__(self, time_interval):
79+
super(period_loss, self).__init__()
80+
self.time_interval = time_interval
81+
self.mse = nn.MSELoss()
82+
83+
def forward(self, x):
84+
"""
85+
86+
:param x: generated sequence, batch_size * seq_len
87+
:return:
88+
"""
89+
loss = 0.
90+
for i in range(0, x.size(1) - self.time_interval):
91+
loss += torch.sum(torch.ne(x[:, i], x[:, i + self.time_interval]))
92+
return loss
93+
94+
95+
class embd_distance_loss(nn.Module):
96+
97+
def __init__(self, embd):
98+
super(embd_distance_loss, self).__init__()
99+
self.embd = embd
100+
101+
def forward(self, x, embd_size):
102+
"""
103+
104+
:param x: generated sequence, batch_size * seq_len
105+
:return:
106+
"""
107+
emb = self.embd(x)
108+
emb = emb.permute(1, 0, 2)
109+
curr = emb[: x.size(1) - 1].contiguous().view(-1, embd_size)
110+
next = emb[1: x.size(1)].contiguous().view(-1, embd_size)
111+
loss = torch.nn.functional.mse_loss(curr, next, reduction='sum')
112+
return loss
113+
114+
115+
class embd_period_loss(nn.Module):
116+
117+
def __init__(self, embd):
118+
super(embd_period_loss, self).__init__()
119+
self.embd = embd
120+
121+
def forward(self, x, embd_size):
122+
"""
123+
124+
:param x: generated sequence, batch_size * seq_len
125+
:return:
126+
"""
127+
emb = self.embd(x)
128+
emb = emb.permute(1, 0, 2)
129+
curr = emb[: 24].contiguous().view(-1, embd_size)
130+
next = emb[24:].contiguous().view(-1, embd_size)
131+
loss = torch.nn.functional.mse_loss(curr, next, reduction='sum')
132+
return loss

0 commit comments

Comments
 (0)