Skip to content

Commit

Permalink
Updated TaskGroup, added train routines for MultitaskLS regression an…
Browse files Browse the repository at this point in the history
…d fixed some bugs in SLEP ported code
  • Loading branch information
lisitsyn committed Jun 16, 2012
1 parent 30c471d commit f621fdf
Show file tree
Hide file tree
Showing 14 changed files with 263 additions and 338 deletions.
1 change: 1 addition & 0 deletions examples/undocumented/libshogun/Makefile
Expand Up @@ -80,6 +80,7 @@ TARGETS = basic_minimal \
library_cover_tree \
kernel_machine_train_locked \
statistics \
transfer_multitasklsregression

all: $(TARGETS)

Expand Down
50 changes: 50 additions & 0 deletions examples/undocumented/libshogun/transfer_multitasklsregression.cpp
@@ -0,0 +1,50 @@
#include <shogun/labels/RegressionLabels.h>
#include <shogun/features/DenseFeatures.h>
#include <shogun/transfer/multitask/Task.h>
#include <shogun/transfer/multitask/TaskGroup.h>
#include <shogun/transfer/multitask/MultitaskLSRegression.h>
#include <shogun/base/init.h>
#include <shogun/lib/common.h>
#include <shogun/io/SGIO.h>

using namespace shogun;

void print_message(FILE* target, const char* str)
{
fprintf(target, "%s", str);
}

int main(int argc, char** argv)
{
init_shogun(&print_message);

// create some data
SGMatrix<float64_t> matrix(2,4);
for (int32_t i=0; i<2*4; i++)
matrix.matrix[i]=i;

CDenseFeatures<float64_t>* features= new CDenseFeatures<float64_t>(matrix);

// create three labels
CRegressionLabels* labels=new CRegressionLabels(4);
labels->set_label(0, -1);
labels->set_label(1, +1);
labels->set_label(2, -1);
labels->set_label(3, +1);

CTask* first_task = new CTask(0,2);
CTask* second_task = new CTask(2,4);

CTaskGroup* task_group = new CTaskGroup();
task_group->add_task(first_task);
task_group->add_task(second_task);

CMultitaskLSRegression* regressor = new CMultitaskLSRegression(0.5,features,labels,task_group);
regressor->train();

regressor->set_current_task(0);
regressor->get_w().display_vector();
SG_UNREF(regressor);
exit_shogun();
return 0;
}
53 changes: 31 additions & 22 deletions src/shogun/lib/slep/slep_mt_lsr.cpp
Expand Up @@ -28,7 +28,7 @@ SGMatrix<double> slep_mt_lsr(
double lambda, lambda_max, beta;
double funcp = 0.0, func = 0.0;

int n_tasks;
int n_tasks = options.n_nodes;

int iter = 1;
bool done = false;
Expand All @@ -37,47 +37,55 @@ SGMatrix<double> slep_mt_lsr(
double* ATy = SG_CALLOC(double, n_feats*n_tasks);
for (t=0; t<n_tasks; t++)
{
int task_ind_start = options.ind[t]+1;
int task_ind_start = options.ind[t];
int task_ind_end = options.ind[t+1];
for (i=task_ind_start; i<task_ind_end; i++)
features->add_to_dense_vec(y[i],i,ATy+t*n_feats,n_feats);
}

if (options.regularization!=0)
{
/*
if (options.general)
lambda_max = findLambdaMax_mt(ATy, n_vecs, n_tasks, options.ind, options.n_nodes);
if (z<0 || z>1)
SG_SERROR("z is not in range [0,1]");

double q_bar = 0.0;
if (options.q==1)
q_bar = CMath::ALMOST_INFTY;
else if (options.q>1e-6)
q_bar = 1;
else
lambda_max = general_findLambdaMax_mt(ATy, n_vecs, n_tasks, options.G,
options.ind, options.n_nodes);
q_bar = options.q/(options.q-1);
lambda_max = 0.0;

for (t=0; t<n_tasks; t++)
{
lambda_max =
CMath::max(lambda_max,
SGVector<float64_t>::qnorm(ATy+t*n_feats, n_feats, q_bar));
}

lambda = z*lambda_max;
*/
}
else
lambda = z;

SGMatrix<double> w(n_feats,n_tasks);
w.zero();
if (options.initial_w)
{
for (j=0; j<n_tasks; j++)
for (i=0; i<n_feats; i++)
w(i,j) = options.initial_w[j*n_feats+i];
}
else
{
for (j=0; j<n_tasks*n_feats; j++)
w[j] = 0.0;
}

double* s = SG_CALLOC(double, n_feats*n_tasks);
double* g = SG_CALLOC(double, n_feats*n_tasks);
double* v = SG_CALLOC(double, n_feats*n_tasks);
double* g = SG_CALLOC(double, n_feats*n_tasks*2);
double* v = SG_CALLOC(double, n_feats*n_tasks*2);

double* Aw = SG_CALLOC(double, n_vecs);
for (t=0; t<n_tasks; t++)
{
int task_ind_start = options.ind[t]+1;
int task_ind_start = options.ind[t];
int task_ind_end = options.ind[t+1];
for (i=task_ind_start; i<task_ind_end; i++)
Aw[i] = features->dense_dot(i,w.matrix+t*n_feats,n_feats);
Expand Down Expand Up @@ -118,7 +126,7 @@ SGMatrix<double> slep_mt_lsr(

for (t=0; t<n_tasks; t++)
{
int task_ind_start = options.ind[t]+1;
int task_ind_start = options.ind[t];
int task_ind_end = options.ind[t+1];
for (i=task_ind_start; i<task_ind_end; i++)
features->add_to_dense_vec(As[i],i,ATAs+t*n_feats,n_feats);
Expand All @@ -131,7 +139,7 @@ SGMatrix<double> slep_mt_lsr(
for (i=0; i<n_feats*n_tasks; i++)
wp[i] = w[i];

for (i=0; i<n_vecs*n_tasks; i++)
for (i=0; i<n_vecs; i++)
Awp[i] = Aw[i];

while (true)
Expand All @@ -140,15 +148,15 @@ SGMatrix<double> slep_mt_lsr(
for (i=0; i<n_feats*n_tasks; i++)
v[i] = s[i] - g[i]*(1.0/L);

eppMatrix(w.matrix, v, n_vecs, n_tasks, lambda/L, options.q);
eppMatrix(w.matrix, v, n_feats, n_tasks, lambda/L, options.q);

// v = x - s
for (i=0; i<n_feats*n_tasks; i++)
v[i] = w[i] - s[i];

for (t=0; t<n_tasks; t++)
{
int task_ind_start = options.ind[t]+1;
int task_ind_start = options.ind[t];
int task_ind_end = options.ind[t+1];
for (i=task_ind_start; i<task_ind_end; i++)
Aw[i] = features->dense_dot(i,w.matrix+t*n_feats,n_feats);
Expand Down Expand Up @@ -185,10 +193,10 @@ SGMatrix<double> slep_mt_lsr(
resid[i] = Aw[i] - y[i];

double w_norm = 0.0;
for (i=0; i<n_vecs; i++)
for (i=0; i<n_feats; i++)
{
double w_row_norm = 0.0;
for (t=0; t<n_tasks; i++)
for (t=0; t<n_tasks; t++)
w_row_norm += CMath::pow(w(i,t),options.q);
w_norm += CMath::pow(w_row_norm,1.0/options.q);
}
Expand Down Expand Up @@ -261,6 +269,7 @@ SGMatrix<double> slep_mt_lsr(
iter++;
}

// SG_SPRINT("Iteration = %d\n", iter);
// TODO Nemirovsky, Nesterov methods

SG_FREE(ATy);
Expand Down
28 changes: 20 additions & 8 deletions src/shogun/lib/slep/slep_options.h
Expand Up @@ -13,6 +13,8 @@

#define IGNORE_IN_CLASSLIST

#include <stdlib.h>

namespace shogun
{

Expand All @@ -26,18 +28,28 @@ IGNORE_IN_CLASSLIST struct slep_options
int restart_num;
int n_nodes;
int regularization;
double* ind;
int* ind;
double* ind_t;
double* G;
double* initial_w;
double q;

static bool get_default_general() { return false; }
static int get_default_termination() { return 2; }
static double get_default_tolerance() { return 1e-3; }
static int get_default_max_iter() { return 1000; }
static int get_default_restart_num() { return 100; }
static int get_default_regularization() { return 0; }
static double get_default_q() { return 2.0; }
static slep_options default_options()
{
slep_options opts;
opts.general = false;
opts.termination = 2;
opts.tolerance = 1e-3;
opts.max_iter = 1000;
opts.restart_num = 100;
opts.regularization = 0;
opts.q = 2.0;
opts.initial_w = NULL;
opts.ind = NULL;
opts.ind_t = NULL;
opts.G = NULL;
return opts;
}
};
#endif
}
Expand Down

0 comments on commit f621fdf

Please sign in to comment.