Skip to content

Commit

Permalink
Clean up CMath: move respective functions to CStatistics, SGVector an…
Browse files Browse the repository at this point in the history
…d SGMatrix
  • Loading branch information
Soeren Sonnenburg committed Jun 8, 2012
1 parent f54eb89 commit abe2a85
Show file tree
Hide file tree
Showing 81 changed files with 2,336 additions and 1,444 deletions.
2 changes: 2 additions & 0 deletions src/shogun/base/Parameter.h
Expand Up @@ -13,6 +13,8 @@
#include <shogun/lib/common.h>
#include <shogun/io/SGIO.h>
#include <shogun/lib/DataType.h>
#include <shogun/lib/SGVector.h>
#include <shogun/lib/SGMatrix.h>
#include <shogun/io/SerializableFile.h>
#include <shogun/base/DynArray.h>

Expand Down
16 changes: 8 additions & 8 deletions src/shogun/classifier/LDA.cpp
Expand Up @@ -154,7 +154,7 @@ bool CLDA::train_machine(CFeatures* data)
1.0/(train_labels.vlen-1), buffer, nf, buffer, nf,
1.0/(train_labels.vlen-1), scatter, nf);

float64_t trace=CMath::trace((float64_t*) scatter, num_feat, num_feat);
float64_t trace=SGMatrix<float64_t>::trace((float64_t*) scatter, num_feat, num_feat);

double s=1.0-m_gamma; /* calling external lib; indirectly */
for (i=0; i<num_feat*num_feat; i++)
Expand All @@ -163,7 +163,7 @@ bool CLDA::train_machine(CFeatures* data)
for (i=0; i<num_feat; i++)
scatter[i*num_feat+i]+= trace*m_gamma/num_feat;

double* inv_scatter= (double*) CMath::pinv(
double* inv_scatter= (double*) SGMatrix<float64_t>::pinv(
scatter, num_feat, num_feat, NULL);

float64_t* w_pos=buffer;
Expand All @@ -174,17 +174,17 @@ bool CLDA::train_machine(CFeatures* data)
cblas_dsymv(CblasColMajor, CblasUpper, nf, 1.0, inv_scatter, nf,
(double*) mean_neg, 1, 0, (double*) w_neg, 1);

bias=0.5*(CMath::dot(w_neg, mean_neg, num_feat)-CMath::dot(w_pos, mean_pos, num_feat));
bias=0.5*(SGVector<float64_t>::dot(w_neg, mean_neg, num_feat)-SGVector<float64_t>::dot(w_pos, mean_pos, num_feat));
for (i=0; i<num_feat; i++)
w.vector[i]=w_pos[i]-w_neg[i];

#ifdef DEBUG_LDA
SG_PRINT("bias: %f\n", bias);
CMath::display_vector(w.vector, num_feat, "w");
CMath::display_vector(w_pos, num_feat, "w_pos");
CMath::display_vector(w_neg, num_feat, "w_neg");
CMath::display_vector(mean_pos, num_feat, "mean_pos");
CMath::display_vector(mean_neg, num_feat, "mean_neg");
SGVector<float64_t>::display_vector(w.vector, num_feat, "w");
SGVector<float64_t>::display_vector(w_pos, num_feat, "w_pos");
SGVector<float64_t>::display_vector(w_neg, num_feat, "w_neg");
SGVector<float64_t>::display_vector(mean_pos, num_feat, "mean_pos");
SGVector<float64_t>::display_vector(mean_neg, num_feat, "mean_neg");
#endif

SG_FREE(mean_neg);
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/classifier/NearestCentroid.cpp
Expand Up @@ -84,7 +84,7 @@ namespace shogun{
int32_t current_class = ((CMulticlassLabels*) m_labels)->get_label(idx);
float64_t* target = centroids.matrix + num_feats*current_class;
float64_t* current = ((CDenseFeatures<float64_t>*)data)->get_feature_vector(idx,current_len,current_free);
CMath::add(target,1.0,target,1.0,current,current_len);
SGVector<float64_t>::add(target,1.0,target,1.0,current,current_len);
num_per_class[current_class]++;
((CDenseFeatures<float64_t>*)data)->free_feature_vector(current, current_len, current_free);
}
Expand All @@ -100,7 +100,7 @@ namespace shogun{
else
scale = 1.0/(float64_t)total;

CMath::scale_vector(scale,target,num_feats);
SGVector<float64_t>::scale_vector(scale,target,num_feats);
}

m_centroids->free_feature_matrix();
Expand Down
10 changes: 5 additions & 5 deletions src/shogun/classifier/mkl/MKL.cpp
Expand Up @@ -223,7 +223,7 @@ bool CMKL::train_machine(CFeatures* data)
int32_t num_kernels = kernel->get_num_subkernels();
SG_INFO("num_kernels = %d\n", num_kernels);
const float64_t* beta_const = kernel->get_subkernel_weights(num_weights);
float64_t* beta = CMath::clone_vector(beta_const, num_weights);
float64_t* beta = SGVector<float64_t>::clone_vector(beta_const, num_weights);
ASSERT(num_weights==num_kernels);

if (get_solver_type()==ST_BLOCK_NORM &&
Expand All @@ -238,16 +238,16 @@ bool CMKL::train_machine(CFeatures* data)
if (get_solver_type()==ST_ELASTICNET)
{
// -- Initialize subkernel weights for Elasticnet MKL
CMath::scale_vector(1/CMath::qnorm(beta, num_kernels, 1.0), beta, num_kernels);
SGVector<float64_t>::scale_vector(1/SGVector<float64_t>::qnorm(beta, num_kernels, 1.0), beta, num_kernels);

SG_FREE(beta_local);
beta_local = CMath::clone_vector(beta, num_kernels);
beta_local = SGVector<float64_t>::clone_vector(beta, num_kernels);

elasticnet_transform(beta, ent_lambda, num_kernels);
}
else
{
CMath::scale_vector(1/CMath::qnorm(beta, num_kernels, mkl_norm),
SGVector<float64_t>::scale_vector(1/SGVector<float64_t>::qnorm(beta, num_kernels, mkl_norm),
beta, num_kernels); //q-norm = 1
}

Expand Down Expand Up @@ -489,7 +489,7 @@ float64_t CMKL::compute_optimal_betas_elasticnet(
}

// --- normalize
CMath::scale_vector(1.0/Z, beta, num_kernels);
SGVector<float64_t>::scale_vector(1.0/Z, beta, num_kernels);

// --- regularize & renormalize

Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/svm/LibLinear.cpp
Expand Up @@ -1359,7 +1359,7 @@ void CLibLinear::init_linear_term()
SG_ERROR("Please assign labels first!\n");

m_linear_term=SGVector<float64_t>(m_labels->get_num_labels());
CMath::fill_vector(m_linear_term.vector, m_linear_term.vlen, -1.0);
SGVector<float64_t>::fill_vector(m_linear_term.vector, m_linear_term.vlen, -1.0);
}

#endif //HAVE_LAPACK
42 changes: 21 additions & 21 deletions src/shogun/classifier/svm/NewtonSVM.cpp
Expand Up @@ -72,7 +72,7 @@ bool CNewtonSVM::train_machine(CFeatures* data)

float64_t* weights = SG_CALLOC(float64_t, x_d+1);
float64_t* out=SG_MALLOC(float64_t, x_n);
CMath::fill_vector(out, x_n, 1.0);
SGVector<float64_t>::fill_vector(out, x_n, 1.0);

int32_t *sv=SG_MALLOC(int32_t, x_n), size_sv=0, iter=0;
float64_t obj, *grad=SG_MALLOC(float64_t, x_d+1);
Expand Down Expand Up @@ -113,10 +113,10 @@ bool CNewtonSVM::train_machine(CFeatures* data)
}
int32_t tx=x_d;
int32_t ty=size_sv;
CMath::transpose_matrix(Xsv, tx, ty);
SGMatrix<float64_t>::transpose_matrix(Xsv, tx, ty);

#ifdef DEBUG_NEWTON
CMath::display_matrix(Xsv, x_d, size_sv);
SGMatrix<float64_t>::display_matrix(Xsv, x_d, size_sv);
#endif

float64_t* lcrossdiag=SG_MALLOC(float64_t, (x_d+1)*(x_d+1));
Expand All @@ -127,7 +127,7 @@ bool CNewtonSVM::train_machine(CFeatures* data)

vector[x_d]=0;

CMath::create_diagonal_matrix(lcrossdiag, vector, x_d+1);
SGMatrix<float64_t>::create_diagonal_matrix(lcrossdiag, vector, x_d+1);
float64_t* Xsv2=SG_MALLOC(float64_t, x_d*x_d);
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, x_d, x_d, size_sv,
1.0, Xsv, size_sv, Xsv, size_sv, 0.0, Xsv2, x_d);
Expand Down Expand Up @@ -155,16 +155,16 @@ bool CNewtonSVM::train_machine(CFeatures* data)
Xsv2sum[x_d*(x_d+1)+x_d]=size_sv;
float64_t* identity_matrix=SG_MALLOC(float64_t, (x_d+1)*(x_d+1));

CMath::fill_vector(vector, x_d+1, 1.0);
SGVector<float64_t>::fill_vector(vector, x_d+1, 1.0);

CMath::create_diagonal_matrix(identity_matrix, vector, x_d+1);
SGMatrix<float64_t>::create_diagonal_matrix(identity_matrix, vector, x_d+1);
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, x_d+1, x_d+1,
x_d+1, 1.0, lcrossdiag, x_d+1, identity_matrix, x_d+1, 1.0,
Xsv2sum, x_d+1);

float64_t* inverse=SG_MALLOC(float64_t, (x_d+1)*(x_d+1));
int32_t r=x_d+1;
CMath::pinv(Xsv2sum, r, r, inverse);
SGMatrix<float64_t>::pinv(Xsv2sum, r, r, inverse);

float64_t* step=SG_MALLOC(float64_t, r);
float64_t* s2=SG_MALLOC(float64_t, r);
Expand All @@ -186,7 +186,7 @@ bool CNewtonSVM::train_machine(CFeatures* data)
SG_PRINT("weights[%d]=%.16g\n", i, weights[i]);
#endif

CMath::vec1_plus_scalar_times_vec2(weights, t, step, r);
SGVector<float64_t>::vec1_plus_scalar_times_vec2(weights, t, step, r);
float64_t newton_decrement;
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, 1, 1, r, -0.5,
step, r, grad, r, 0.0, &newton_decrement, 1);
Expand Down Expand Up @@ -243,7 +243,7 @@ void CNewtonSVM::line_search_linear(float64_t* weights, float64_t* d, float64_t*
for (int32_t i=0; i<x_n; i++)
Xd[i]=features->dense_dot(i, d, x_d);

CMath::add_scalar(d[x_d], Xd, x_n);
SGVector<float64_t>::add_scalar(d[x_d], Xd, x_n);

#ifdef DEBUG_NEWTON
CMath::display_vector(d, x_d+1, "Weight vector");
Expand All @@ -266,10 +266,10 @@ void CNewtonSVM::line_search_linear(float64_t* weights, float64_t* d, float64_t*

do
{
CMath::vector_multiply(temp1, Y.vector, Xd, x_n);
SGVector<float64_t>::vector_multiply(temp1, Y.vector, Xd, x_n);
memcpy(temp1forout, temp1, sizeof(float64_t)*x_n);
CMath::scale_vector(t, temp1forout, x_n);
CMath::add(outz, 1.0, out, -1.0, temp1forout, x_n);
SGVector<float64_t>::scale_vector(t, temp1forout, x_n);
SGVector<float64_t>::add(outz, 1.0, out, -1.0, temp1forout, x_n);

// Calculation of sv
sv_len=0;
Expand All @@ -289,8 +289,8 @@ void CNewtonSVM::line_search_linear(float64_t* weights, float64_t* d, float64_t*
}

memset(temp1, 0, sizeof(float64_t)*sv_len);
CMath::vector_multiply(temp1, outzsv, Ysv, sv_len);
tempg=CMath::dot(temp1, Xsv, sv_len);
SGVector<float64_t>::vector_multiply(temp1, outzsv, Ysv, sv_len);
tempg=SGVector<float64_t>::dot(temp1, Xsv, sv_len);
g=wd+(t*dd);
g-=tempg;

Expand Down Expand Up @@ -347,18 +347,18 @@ void CNewtonSVM::obj_fun_linear(float64_t* weights, float64_t* out,
float64_t* out1=SG_MALLOC(float64_t, x_n);

//compute steps for obj
CMath::vector_multiply(out1, out, out, x_n);
float64_t p1=CMath::sum(out1, x_n)/2;
SGVector<float64_t>::vector_multiply(out1, out, out, x_n);
float64_t p1=SGVector<float64_t>::sum(out1, x_n)/2;
float64_t C1;
float64_t* w0copy=SG_MALLOC(float64_t, x_d+1);
memcpy(w0copy, w0, sizeof(float64_t)*(x_d+1));
CMath::scale_vector(0.5, w0copy, x_d+1);
SGVector<float64_t>::scale_vector(0.5, w0copy, x_d+1);
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, 1, 1, x_d+1, lambda,
w0, x_d+1, w0copy, x_d+1, 0.0, &C1, 1);
*obj=p1+C1;
CMath::scale_vector(lambda, w0, x_d);
SGVector<float64_t>::scale_vector(lambda, w0, x_d);
float64_t* temp=SG_CALLOC(float64_t, x_n); //temp = out.*Y
CMath::vector_multiply(temp, out, v.vector, x_n);
SGVector<float64_t>::vector_multiply(temp, out, v.vector, x_n);
float64_t* temp1=SG_CALLOC(float64_t, x_d);
SGVector<float64_t> vec;

Expand All @@ -376,8 +376,8 @@ void CNewtonSVM::obj_fun_linear(float64_t* weights, float64_t* out,
for (int32_t i=0; i<x_d; i++)
p2[i]=temp1[i];

p2[x_d]=CMath::sum(temp, x_n);
CMath::add(grad, 1.0, w0, -1.0, p2, x_d+1);
p2[x_d]=SGVector<float64_t>::sum(temp, x_n);
SGVector<float64_t>::add(grad, 1.0, w0, -1.0, p2, x_d+1);
int32_t sv_len=0;

for (int32_t i=0; i<x_n; i++)
Expand Down
6 changes: 3 additions & 3 deletions src/shogun/classifier/svm/OnlineSVMSGD.cpp
Expand Up @@ -140,7 +140,7 @@ bool COnlineSVMSGD::train(CFeatures* data)
float32_t r = 1 - eta * lambda * skip;
if (r < 0.8)
r = pow(1 - eta * lambda, skip);
CMath::scale_vector(r, w, w_dim);
SGVector<float32_t>::scale_vector(r, w, w_dim);
count = skip;
}
t++;
Expand All @@ -158,7 +158,7 @@ bool COnlineSVMSGD::train(CFeatures* data)
}

features->end_parser();
float64_t wnorm = CMath::dot(w,w, w_dim);
float64_t wnorm = SGVector<float32_t>::dot(w,w, w_dim);
SG_INFO("Norm: %.6f, Bias: %.6f\n", wnorm, bias);

return true;
Expand All @@ -184,7 +184,7 @@ void COnlineSVMSGD::calibrate(int32_t max_vec_num)

//waste cpu cycles for readability
//(only changed dims need checking)
m=CMath::max(c, c_dim);
m=SGVector<float32_t>::max(c, c_dim);
n++;

features->release_example();
Expand Down
6 changes: 3 additions & 3 deletions src/shogun/classifier/svm/QPBSVMLib.cpp
Expand Up @@ -571,7 +571,7 @@ int32_t CQPBSVMLib::qpbsvm_prloqo(float64_t *x,

float64_t b=0;

CMath::display_vector(m_f, m_dim, "m_f");
SGVector<float64_t>::display_vector(m_f, m_dim, "m_f");
int32_t result=pr_loqo(m_dim, 1, m_f, m_H, a, &b, lb, ub, primal, dual,
2, 5, 1, -0.95, 10,0);

Expand Down Expand Up @@ -599,7 +599,7 @@ int32_t CQPBSVMLib::qpbsvm_gauss_seidel(float64_t *x,
{
for (int32_t i=0; i<m_dim; i++)
{
x[i]= (-m_f[i]-(CMath::dot(x,&m_H[m_dim*i], m_dim) -
x[i]= (-m_f[i]-(SGVector<float64_t>::dot(x,&m_H[m_dim*i], m_dim) -
m_H[m_dim*i+i]*x[i]))/m_H[m_dim*i+i];
x[i]=CMath::clamp(x[i], 0.0, 1.0);
}
Expand Down Expand Up @@ -630,7 +630,7 @@ int32_t CQPBSVMLib::qpbsvm_gradient_descent(float64_t *x,
{
for (int32_t i=0; i<m_dim; i++)
{
x[i]-=0.001*(CMath::dot(x,&m_H[m_dim*i], m_dim)+m_f[i]);
x[i]-=0.001*(SGVector<float64_t>::dot(x,&m_H[m_dim*i], m_dim)+m_f[i]);
x[i]=CMath::clamp(x[i], 0.0, 1.0);
}
}
Expand Down
14 changes: 7 additions & 7 deletions src/shogun/classifier/svm/SGDQN.cpp
Expand Up @@ -129,7 +129,7 @@ bool CSGDQN::train(CFeatures* data)


float64_t* Bc=SG_MALLOC(float64_t, w.vlen);
CMath::fill_vector(Bc, w.vlen, 1/lambda);
SGVector<float64_t>::fill_vector(Bc, w.vlen, 1/lambda);

float64_t* result=SG_MALLOC(float64_t, w.vlen);
float64_t* B=SG_MALLOC(float64_t, w.vlen);
Expand Down Expand Up @@ -162,8 +162,8 @@ bool CSGDQN::train(CFeatures* data)
{
SGVector<float64_t> w_1=w.clone();
float64_t loss_1=-loss->first_derivative(z,1);
CMath::vector_multiply(result,Bc,v.vector,w.vlen);
CMath::add(w.vector,eta*loss_1*y,result,1.0,w.vector,w.vlen);
SGVector<float64_t>::vector_multiply(result,Bc,v.vector,w.vlen);
SGVector<float64_t>::add(w.vector,eta*loss_1*y,result,1.0,w.vector,w.vlen);
float64_t z2 = y * features->dense_dot(i, w.vector, w.vlen);
float64_t diffloss = -loss->first_derivative(z2,1) - loss_1;
if(diffloss)
Expand All @@ -181,16 +181,16 @@ bool CSGDQN::train(CFeatures* data)
{
if(--count<=0)
{
CMath::vector_multiply(result,Bc,w.vector,w.vlen);
CMath::add(w.vector,-skip*lambda*eta,result,1.0,w.vector,w.vlen);
SGVector<float64_t>::vector_multiply(result,Bc,w.vector,w.vlen);
SGVector<float64_t>::add(w.vector,-skip*lambda*eta,result,1.0,w.vector,w.vlen);
count = skip;
updateB=true;
}

if (z < 1 || is_log_loss)
{
CMath::vector_multiply(result,Bc,v.vector,w.vlen);
CMath::add(w.vector,eta*-loss->first_derivative(z,1)*y,result,1.0,w.vector,w.vlen);
SGVector<float64_t>::vector_multiply(result,Bc,v.vector,w.vlen);
SGVector<float64_t>::add(w.vector,eta*-loss->first_derivative(z,1)*y,result,1.0,w.vector,w.vlen);
}
}
t++;
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/classifier/svm/SVMLight.cpp
Expand Up @@ -300,7 +300,7 @@ void CSVMLight::svm_learn()
SGVector<int32_t> lab=((CBinaryLabels*) m_labels)->get_int_labels();
int32_t totdoc=lab.vlen;
ASSERT(lab.vector && lab.vlen);
int32_t* label=CMath::clone_vector(lab.vector, lab.vlen);
int32_t* label=SGVector<int32_t>::clone_vector(lab.vector, lab.vlen);

int32_t* docs=SG_MALLOC(int32_t, totdoc);
SG_FREE(W);
Expand Down Expand Up @@ -349,7 +349,7 @@ void CSVMLight::svm_learn()
else
{
learn_parm->eps=SG_MALLOC(float64_t, totdoc); /* equivalent regression epsilon for classification */
CMath::fill_vector(learn_parm->eps, totdoc, -1.0);
SGVector<float64_t>::fill_vector(learn_parm->eps, totdoc, -1.0);
}

learn_parm->svm_cost = SG_MALLOC(float64_t, totdoc);
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/classifier/svm/SVMOcas.cpp
Expand Up @@ -320,8 +320,8 @@ void CSVMOcas::compute_W(
bias += c_bias[i]*alpha[i];
}

*sq_norm_W = CMath::dot(W,W, nDim) + CMath::sq(bias);
*dp_WoldW = CMath::dot(W,oldW, nDim) + bias*old_bias;
*sq_norm_W = SGVector<float64_t>::dot(W,W, nDim) + CMath::sq(bias);
*dp_WoldW = SGVector<float64_t>::dot(W,oldW, nDim) + bias*old_bias;
//SG_PRINT("nSel=%d sq_norm_W=%f dp_WoldW=%f\n", nSel, *sq_norm_W, *dp_WoldW);

o->bias = bias;
Expand Down
6 changes: 3 additions & 3 deletions src/shogun/classifier/svm/SVMSGD.cpp
Expand Up @@ -143,14 +143,14 @@ bool CSVMSGD::train_machine(CFeatures* data)
float64_t r = 1 - eta * lambda * skip;
if (r < 0.8)
r = pow(1 - eta * lambda, skip);
CMath::scale_vector(r, w.vector, w.vlen);
SGVector<float64_t>::scale_vector(r, w.vector, w.vlen);
count = skip;
}
t++;
}
}

float64_t wnorm = CMath::dot(w.vector,w.vector, w.vlen);
float64_t wnorm = SGVector<float64_t>::dot(w.vector,w.vector, w.vlen);
SG_INFO("Norm: %.6f, Bias: %.6f\n", wnorm, bias);

return true;
Expand Down Expand Up @@ -182,7 +182,7 @@ void CSVMSGD::calibrate()

//waste cpu cycles for readability
//(only changed dims need checking)
m=CMath::max(c, c_dim);
m=SGVector<float64_t>::max(c, c_dim);
}

// bias update scaling
Expand Down

0 comments on commit abe2a85

Please sign in to comment.