Skip to content

Commit

Permalink
Merge branch 'slep' of git://github.com/lisitsyn/shogun
Browse files Browse the repository at this point in the history
  • Loading branch information
lisitsyn committed Jun 17, 2012
2 parents afe730c + d48a682 commit c3bfbbe
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 24 deletions.
@@ -0,0 +1,34 @@
from numpy import array
from numpy.random import seed, rand
from tools.load import LoadMatrix
lm=LoadMatrix()

traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')

parameter_list = [[traindat,testdat,label_traindat]]

def transfer_multitask_group_regression(fm_train=traindat,fm_test=testdat,label_train=label_traindat):

from modshogun import RegressionLabels, RealFeatures, Task, TaskGroup, MultitaskLSRegression

features = RealFeatures(traindat)
labels = RegressionLabels(label_train)

n_vectors = features.get_num_vectors()
task_one = Task(0,n_vectors/2)
task_two = Task(n_vectors/2,n_vectors)
task_group = TaskGroup()
task_group.add_task(task_one)
task_group.add_task(task_two)

mtlsr = MultitaskLSRegression(0.1,features,labels,task_group)
mtlsr.train()
mtlsr.set_current_task(0)
out = mtlsr.apply_regression().get_labels()
return out

if __name__=='__main__':
print('TransferMultitaskGroupRegression')
transfer_multitask_group_regression(*parameter_list[0])
29 changes: 5 additions & 24 deletions src/shogun/lib/slep/slep_mt_lsr.cpp
Expand Up @@ -79,8 +79,8 @@ SGMatrix<double> slep_mt_lsr(
}

double* s = SG_CALLOC(double, n_feats*n_tasks);
double* g = SG_CALLOC(double, n_feats*n_tasks*2);
double* v = SG_CALLOC(double, n_feats*n_tasks*2);
double* g = SG_CALLOC(double, n_feats*n_tasks);
double* v = SG_CALLOC(double, n_feats*n_tasks);

double* Aw = SG_CALLOC(double, n_vecs);
for (t=0; t<n_tasks; t++)
Expand Down Expand Up @@ -165,7 +165,6 @@ SGMatrix<double> slep_mt_lsr(
for (i=0; i<n_vecs; i++)
Av[i] = Aw[i] - As[i];

// squared frobenius norm of r
double r_sum = SGVector<float64_t>::dot(v,v,n_feats*n_tasks);

double l_sum = SGVector<float64_t>::dot(Av,Av,n_vecs);
Expand All @@ -192,17 +191,17 @@ SGMatrix<double> slep_mt_lsr(
for (i=0; i<n_vecs; i++)
resid[i] = Aw[i] - y[i];

double w_norm = 0.0;
double regularizer = 0.0;
for (i=0; i<n_feats; i++)
{
double w_row_norm = 0.0;
for (t=0; t<n_tasks; t++)
w_row_norm += CMath::pow(w(i,t),options.q);
w_norm += CMath::pow(w_row_norm,1.0/options.q);
regularizer += CMath::pow(w_row_norm,1.0/options.q);
}

funcp = func;
func = 0.5*SGVector<float64_t>::dot(resid,resid,n_vecs) + lambda*w_norm;
func = 0.5*SGVector<float64_t>::dot(resid,resid,n_vecs) + lambda*regularizer;

if (gradient_break)
break;
Expand Down Expand Up @@ -251,27 +250,9 @@ SGMatrix<double> slep_mt_lsr(
done = true;
}

if (iter%options.restart_num==0)
{
alphap = 0.0;
alpha = 1.0;
L = 0.5*L;
for (i=0; i<n_feats; i++)
wp[i] = w[i];

for (i=0; i<n_vecs; i++)
Awp[i] = Aw[i];

for (i=0; i<n_feats; i++)
wwp[i] = 0.0;
}

iter++;
}

// SG_SPRINT("Iteration = %d\n", iter);
// TODO Nemirovsky, Nesterov methods

SG_FREE(ATy);
SG_FREE(wp);
SG_FREE(wwp);
Expand Down

0 comments on commit c3bfbbe

Please sign in to comment.