-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathreduced_rank_regressor.py
52 lines (43 loc) · 1.58 KB
/
reduced_rank_regressor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
"""
Reduced rank regression class.
Requires scipy to be installed.
Implemented by Chris Rayner (2015)
dchrisrayner AT gmail DOT com
Optimal linear 'bottlenecking' or 'multitask learning'.
"""
import numpy as np
from scipy import sparse
def ideal_data(num, dimX, dimY, rrank, noise=1):
"""Low rank data"""
X = np.random.randn(num, dimX)
W = np.dot(np.random.randn(dimX, rrank), np.random.randn(rrank, dimY))
Y = np.dot(X, W) + np.random.randn(num, dimY) * noise
return X, Y
class ReducedRankRegressor(object):
"""
Reduced Rank Regressor (linear 'bottlenecking' or 'multitask learning')
- X is an n-by-d matrix of features.
- Y is an n-by-D matrix of targets.
- rrank is a rank constraint.
- reg is a regularization parameter (optional).
"""
def __init__(self, X, Y, rank, reg=None):
if np.size(np.shape(X)) == 1:
X = np.reshape(X, (-1, 1))
if np.size(np.shape(Y)) == 1:
Y = np.reshape(Y, (-1, 1))
if reg is None:
reg = 0
self.rank = rank
CXX = np.dot(X.T, X) + reg * sparse.eye(np.size(X, 1))
CXY = np.dot(X.T, Y)
_U, _S, V = np.linalg.svd(np.dot(CXY.T, np.dot(np.linalg.pinv(CXX), CXY)))
self.W = V[0:rank, :].T
self.A = np.dot(np.linalg.pinv(CXX), np.dot(CXY, self.W)).T
def __str__(self):
return 'Reduced Rank Regressor (rank = {})'.format(self.rank)
def predict(self, X):
"""Predict Y from X."""
if np.size(np.shape(X)) == 1:
X = np.reshape(X, (-1, 1))
return np.dot(X, np.dot(self.A.T, self.W.T))