Skip to content

Commit

Permalink
remove most of the destroy/free_vector calls
Browse files Browse the repository at this point in the history
that should fix compilation
  • Loading branch information
Soeren Sonnenburg committed May 3, 2012
1 parent 9d458c8 commit e2748d0
Show file tree
Hide file tree
Showing 24 changed files with 17 additions and 93 deletions.
1 change: 0 additions & 1 deletion src/shogun/classifier/AveragedPerceptron.cpp
Expand Up @@ -94,7 +94,6 @@ bool CAveragedPerceptron::train(CFeatures* data)
bias=tmp_bias/(num_vec*iter);

SG_FREE(output);
train_labels.free_vector();
SG_FREE(tmp_w);

return converged;
Expand Down
15 changes: 2 additions & 13 deletions src/shogun/classifier/GaussianNaiveBayes.cpp
Expand Up @@ -42,9 +42,7 @@ CGaussianNaiveBayes::~CGaussianNaiveBayes()
SG_UNREF(m_features);

m_means.destroy_matrix();
m_rates.destroy_vector();
m_variances.destroy_matrix();
m_label_prob.destroy_vector();
};

CFeatures* CGaussianNaiveBayes::get_features()
Expand Down Expand Up @@ -107,18 +105,14 @@ bool CGaussianNaiveBayes::train(CFeatures* data)
m_variances.num_rows = m_dim;
m_variances.num_cols = m_num_classes;

m_label_prob.vector = SG_MALLOC(float64_t, m_num_classes);
m_label_prob.vlen = m_num_classes;
m_label_prob=SGVector<float64_t>(m_num_classes);

// allocate memory for label rates
m_rates.vector = SG_MALLOC(float64_t, m_num_classes);
m_rates.vlen = m_num_classes;
m_rates=SGVector<float64_t>(m_num_classes);

// assure that memory is allocated
ASSERT(m_means.matrix);
ASSERT(m_variances.matrix);
ASSERT(m_rates.vector);
ASSERT(m_label_prob.vector);

// make arrays filled by zeros before using
m_means.zero();
Expand All @@ -139,7 +133,6 @@ bool CGaussianNaiveBayes::train(CFeatures* data)
SGVector<float64_t> fea = m_features->get_computed_dot_feature_vector(i);
for (j=0; j<m_dim; j++)
m_means(j, train_labels.vector[i]) += fea.vector[j];
fea.free_vector();

m_label_prob.vector[train_labels.vector[i]]+=1.0;

Expand All @@ -166,7 +159,6 @@ bool CGaussianNaiveBayes::train(CFeatures* data)
m_variances(j, train_labels.vector[i]) +=
CMath::sq(fea[j]-m_means(j, train_labels.vector[i]));
}
fea.free_vector();

progress++;
SG_PROGRESS(progress, 0, max_progress);
Expand All @@ -186,8 +178,6 @@ bool CGaussianNaiveBayes::train(CFeatures* data)
}
SG_DONE();

train_labels.free_vector();

return true;
}

Expand Down Expand Up @@ -256,7 +246,6 @@ float64_t CGaussianNaiveBayes::apply(int32_t idx)
if (m_rates.vector[i]>m_rates.vector[max_label_idx])
max_label_idx = i;
}
feature_vector.free_vector();

return max_label_idx+m_min_label;
};
1 change: 0 additions & 1 deletion src/shogun/classifier/LDA.cpp
Expand Up @@ -188,7 +188,6 @@ bool CLDA::train_machine(CFeatures* data)
CMath::display_vector(mean_neg, num_feat, "mean_neg");
#endif

train_labels.free_vector();
SG_FREE(mean_neg);
SG_FREE(mean_pos);
SG_FREE(scatter);
Expand Down
1 change: 0 additions & 1 deletion src/shogun/classifier/Perceptron.cpp
Expand Up @@ -83,7 +83,6 @@ bool CPerceptron::train_machine(CFeatures* data)
SG_WARNING("Perceptron algorithm did not converge after %d iterations.\n", max_iter);

SG_FREE(output);
train_labels.free_vector();

return converged;
}
7 changes: 1 addition & 6 deletions src/shogun/classifier/QDA.cpp
Expand Up @@ -62,7 +62,6 @@ void CQDA::cleanup()
m_covs.free_ndarray();
m_M.free_ndarray();
m_means.free_matrix();
m_slog.free_vector();

m_num_classes = 0;
}
Expand All @@ -81,7 +80,6 @@ CLabels* CQDA::apply()
SGMatrix< float64_t > X(num_vecs, m_dim);
SGMatrix< float64_t > A(num_vecs, m_dim);
SGVector< float64_t > norm2(num_vecs*m_num_classes);

norm2.zero();

int i, j, k, vlen;
Expand Down Expand Up @@ -132,7 +130,6 @@ CLabels* CQDA::apply()
CMath::display_vector(out->get_labels().vector, num_vecs, "Labels");
#endif

norm2.destroy_vector();
A.destroy_matrix();
X.destroy_matrix();

Expand Down Expand Up @@ -300,7 +297,7 @@ bool CQDA::train_machine(CFeatures* data)
M_dims[2] = m_num_classes;
m_M = SGNDArray< float64_t >(M_dims, 3, true);

m_slog = SGVector< float32_t >(m_num_classes, true);
m_slog = SGVector< float32_t >(m_num_classes);
m_slog.zero();

index_t idx = 0;
Expand Down Expand Up @@ -345,8 +342,6 @@ bool CQDA::train_machine(CFeatures* data)

rotations.destroy_ndarray();
scalings.destroy_matrix();
sinvsqrt.destroy_vector();
train_labels.destroy_vector();
SG_FREE(class_idxs);
SG_FREE(class_nums);
return true;
Expand Down
5 changes: 1 addition & 4 deletions src/shogun/classifier/mkl/MKL.cpp
Expand Up @@ -1473,7 +1473,7 @@ void CMKL::compute_sum_beta(float64_t* sumw)

int32_t nsv=svm->get_num_support_vectors();
int32_t num_kernels = kernel->get_num_subkernels();
SGVector<float64_t> beta=SGVector<float64_t>(num_kernels, true);
SGVector<float64_t> beta=SGVector<float64_t>(num_kernels);
int32_t nweights=0;
const float64_t* old_beta = kernel->get_subkernel_weights(nweights);
ASSERT(nweights==num_kernels);
Expand Down Expand Up @@ -1507,9 +1507,6 @@ void CMKL::compute_sum_beta(float64_t* sumw)

mkl_iterations++;
kernel->set_subkernel_weights(SGVector<float64_t>( (float64_t*) old_beta, num_kernels));

/* safe because of above comment, otherwise: memleak */
beta.free_vector();
}


Expand Down
2 changes: 0 additions & 2 deletions src/shogun/classifier/mkl/MKLMulticlass.cpp
Expand Up @@ -241,11 +241,9 @@ void CMKLMulticlass::addingweightsstep( const std::vector<float64_t> &

float64_t CMKLMulticlass::getsumofsignfreealphas()
{

std::vector<int> trainlabels2(m_labels->get_num_labels());
SGVector<int32_t> lab=m_labels->get_int_labels();
std::copy(lab.vector,lab.vector+lab.vlen, trainlabels2.begin());
lab.free_vector();

ASSERT (trainlabels2.size()>0);
float64_t sum=0;
Expand Down
1 change: 0 additions & 1 deletion src/shogun/classifier/svm/NewtonSVM.cpp
Expand Up @@ -108,7 +108,6 @@ bool CNewtonSVM::train_machine(CFeatures* data)
sgv=features->get_computed_dot_feature_vector(sv[k]);
for (int32_t j=0; j<x_d; j++)
Xsv[k*x_d+j]=sgv.vector[j];
sgv.destroy_vector();
}
int32_t tx=x_d;
int32_t ty=size_sv;
Expand Down
2 changes: 0 additions & 2 deletions src/shogun/classifier/svm/SGDQN.cpp
Expand Up @@ -196,8 +196,6 @@ bool CSGDQN::train(CFeatures* data)
}
}
t++;

v.free_vector();
}
}
SG_FREE(result);
Expand Down
16 changes: 5 additions & 11 deletions src/shogun/classifier/svm/SVM.cpp
Expand Up @@ -41,7 +41,6 @@ CSVM::CSVM(float64_t C, CKernel* k, CLabels* lab)

CSVM::~CSVM()
{
m_linear_term.destroy_vector();
SG_UNREF(mkl);
}

Expand Down Expand Up @@ -303,15 +302,15 @@ float64_t* CSVM::get_linear_term_array()
{
if (m_linear_term.vlen==0)
return NULL;
float64_t* a = SG_MALLOC(float64_t, m_linear_term.vlen);

SGVector<float64_t> a(m_linear_term.vlen);
memcpy(a.vector, m_linear_term.vector,
memcpy(a, m_linear_term.vector,
m_linear_term.vlen*sizeof(float64_t));

return a.vector;
return a;
}

void CSVM::set_linear_term(const SGVector<float64_t>& linear_term)
void CSVM::set_linear_term(const SGVector<float64_t> linear_term)
{
ASSERT(linear_term.vector);

Expand All @@ -326,12 +325,7 @@ void CSVM::set_linear_term(const SGVector<float64_t>& linear_term)
"of entries (%d) in linear term \n", num_labels, linear_term.vlen);
}

m_linear_term.destroy_vector();

m_linear_term.vlen=linear_term.vlen;
m_linear_term=SGVector<float64_t> (linear_term.vlen);
memcpy(m_linear_term.vector, linear_term.vector,
linear_term.vlen*sizeof(float64_t));
m_linear_term=linear_term;
}

SGVector<float64_t> CSVM::get_linear_term()
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/svm/SVM.h
Expand Up @@ -81,7 +81,7 @@ class CSVM : public CKernelMachine
*
* @param linear_term the linear term
*/
virtual void set_linear_term(const SGVector<float64_t>& linear_term);
virtual void set_linear_term(const SGVector<float64_t> linear_term);


/** load a SVM from file
Expand Down
1 change: 0 additions & 1 deletion src/shogun/classifier/svm/SVMLin.cpp
Expand Up @@ -105,6 +105,5 @@ bool CSVMLin::train_machine(CFeatures* data)

SG_FREE(Data.C);
SG_FREE(Outputs.vec);
train_labels.free_vector();
return true;
}
3 changes: 0 additions & 3 deletions src/shogun/classifier/svm/SVMOcas.cpp
Expand Up @@ -135,8 +135,6 @@ bool CSVMOcas::train_machine(CFeatures* data)
SG_FREE(cp_bias);
cp_bias=NULL;

lab.free_vector();

SG_FREE(old_w);
old_w=NULL;

Expand Down Expand Up @@ -340,7 +338,6 @@ void CSVMOcas::init()
w=NULL;
old_w=NULL;
tmp_a_buf=NULL;
lab.destroy_vector();
cp_value=NULL;
cp_index=NULL;
cp_nz_dims=NULL;
Expand Down
2 changes: 0 additions & 2 deletions src/shogun/classifier/svm/WDSVMOcas.cpp
Expand Up @@ -244,8 +244,6 @@ bool CWDSVMOcas::train_machine(CFeatures* data)
SG_FREE(cuts);

lab=NULL;
labvec.free_vector();

SG_UNREF(alphabet);

return true;
Expand Down
15 changes: 3 additions & 12 deletions src/shogun/clustering/GMM.cpp
Expand Up @@ -106,8 +106,8 @@ void CGMM::cleanup()
for (int32_t i = 0; i < m_components.vlen; i++)
SG_UNREF(m_components.vector[i]);

m_components.destroy_vector();
m_coefficients.destroy_vector();
m_components.unref();
m_coefficients.unref();
}

bool CGMM::train(CFeatures* data)
Expand Down Expand Up @@ -178,7 +178,6 @@ float64_t CGMM::train_em(float64_t min_cov, int32_t max_iter, float64_t min_chan

logPx[i]=CMath::log(logPx[i]);
log_likelihood_cur+=logPx[i];
v.free_vector();

for (int32_t j=0; j<m_components.vlen; j++)
{
Expand Down Expand Up @@ -244,7 +243,6 @@ float64_t CGMM::train_smem(int32_t max_iter, int32_t max_cand, float64_t min_cov
}

logPx[i]=CMath::log(logPx[i]);
v.free_vector();

for (int32_t j=0; j<m_components.vlen; j++)
{
Expand Down Expand Up @@ -372,7 +370,6 @@ void CGMM::partial_em(int32_t comp1, int32_t comp2, int32_t comp3, float64_t min
post_add[i]=CMath::log(CMath::exp(init_logPxy[i*m_components.vlen+comp1]-init_logPx[i])+
CMath::exp(init_logPxy[i*m_components.vlen+comp2]-init_logPx[i])+
CMath::exp(init_logPxy[i*m_components.vlen+comp3]-init_logPx[i]));
v.free_vector();
}

SGVector<CGaussian*> components(3);
Expand Down Expand Up @@ -495,7 +492,6 @@ void CGMM::partial_em(int32_t comp1, int32_t comp2, int32_t comp3, float64_t min

logPx[i]=CMath::log(logPx[i]+init_logPx_fix[i]);
log_likelihood_cur+=logPx[i];
v.free_vector();

for (int32_t j=0; j<3; j++)
{
Expand Down Expand Up @@ -549,7 +545,6 @@ void CGMM::max_likelihood(SGMatrix<float64_t> alpha, float64_t min_cov)
alpha_sum+=alpha.matrix[j*alpha.num_cols+i];
SGVector<float64_t> v=dotdata->get_computed_dot_feature_vector(j);
CMath::add<float64_t>(mean_sum, alpha.matrix[j*alpha.num_cols+i], v.vector, 1, mean_sum, v.vlen);
v.free_vector();
}

for (int32_t j=0; j<num_dim; j++)
Expand Down Expand Up @@ -601,8 +596,6 @@ void CGMM::max_likelihood(SGMatrix<float64_t> alpha, float64_t min_cov)
cov_sum[0]+=temp*alpha.matrix[j*alpha.num_cols+i];
break;
}

v.free_vector();
}

switch (cov_type)
Expand Down Expand Up @@ -705,9 +698,8 @@ SGVector<float64_t> CGMM::get_coef()
return m_coefficients;
}

void CGMM::set_coef(const SGVector<float64_t>& coefficients)
void CGMM::set_coef(const SGVector<float64_t> coefficients)
{
m_coefficients.destroy_vector();
m_coefficients=coefficients;
}

Expand All @@ -723,7 +715,6 @@ void CGMM::set_comp(const SGVector<CGaussian*>& components)
SG_UNREF(m_components.vector[i]);
}

m_components.destroy_vector();
m_components=components;

for (int32_t i=0; i<m_components.vlen; i++)
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/clustering/GMM.h
Expand Up @@ -177,7 +177,7 @@ class CGMM : public CDistribution
*
* @param coefficients mixing coefficients
*/
virtual void set_coef(const SGVector<float64_t>& coefficients);
virtual void set_coef(const SGVector<float64_t> coefficients);

/** get components
*
Expand Down
3 changes: 0 additions & 3 deletions src/shogun/clustering/KMeans.cpp
Expand Up @@ -42,7 +42,6 @@ CKMeans::CKMeans(int32_t k_, CDistance* d)

CKMeans::~CKMeans()
{
R.destroy_vector();
}

bool CKMeans::train_machine(CFeatures* data)
Expand All @@ -65,7 +64,6 @@ bool CKMeans::train_machine(CFeatures* data)
Weights.vector[i]=1.0;

clustknb(false, NULL);
Weights.destroy_vector();

return true;
}
Expand Down Expand Up @@ -188,7 +186,6 @@ void CKMeans::clustknb(bool use_old_mus, float64_t *mus_start)
const int32_t XDimk=dimensions*k;
int32_t iter=0;

R.destroy_vector();
R=SGVector<float64_t>(k);

mus=SGMatrix<float64_t>(dimensions, k);
Expand Down
3 changes: 0 additions & 3 deletions src/shogun/distance/MahalanobisDistance.cpp
Expand Up @@ -91,9 +91,6 @@ float64_t CMahalanobisDistance::compute(int32_t idx_a, int32_t idx_b)

float64_t result = cblas_ddot(v.vlen, v.vector, 1, diff.vector, 1);

v.destroy_vector();
diff.destroy_vector();

if (!use_mean)
((CSimpleFeatures<float64_t>*) lhs)->free_feature_vector(avec, idx_a);

Expand Down

0 comments on commit e2748d0

Please sign in to comment.