Skip to content

Commit

Permalink
remove destroy / free vectro functions
Browse files Browse the repository at this point in the history
  • Loading branch information
Soeren Sonnenburg committed May 3, 2012
1 parent 52b1bf0 commit 9d458c8
Show file tree
Hide file tree
Showing 29 changed files with 56 additions and 132 deletions.
1 change: 0 additions & 1 deletion src/shogun/classifier/svm/GPBTSVM.cpp
Expand Up @@ -125,7 +125,6 @@ bool CGPBTSVM::train_machine(CFeatures* data)
}

delete prob.KER;
lab.free_vector();
SG_FREE(solution);

return true;
Expand Down
20 changes: 17 additions & 3 deletions src/shogun/classifier/svm/LibLinear.cpp
Expand Up @@ -72,7 +72,6 @@ void CLibLinear::init()

CLibLinear::~CLibLinear()
{
m_linear_term.destroy_vector();
}

bool CLibLinear::train_machine(CFeatures* data)
Expand Down Expand Up @@ -1133,6 +1132,23 @@ void CLibLinear::solve_l1r_lr(
delete [] xjpos_sum;
}

void CLibLinear::set_linear_term(const SGVector<float64_t> linear_term)
{
if (!m_labels)
SG_ERROR("Please assign labels first!\n");

int32_t num_labels=m_labels->get_num_labels();

if (num_labels!=linear_term.vlen)
{
SG_ERROR("Number of labels (%d) does not match number"
" of entries (%d) in linear term \n", num_labels,
linear_term.vlen);
}

m_linear_term=linear_term;
}

SGVector<float64_t> CLibLinear::get_linear_term()
{
if (!m_linear_term.vlen || !m_linear_term.vector)
Expand All @@ -1146,8 +1162,6 @@ void CLibLinear::init_linear_term()
if (!m_labels)
SG_ERROR("Please assign labels first!\n");

m_linear_term.destroy_vector();

m_linear_term=SGVector<float64_t>(m_labels->get_num_labels());
CMath::fill_vector(m_linear_term.vector, m_linear_term.vlen, -1.0);
}
Expand Down
20 changes: 1 addition & 19 deletions src/shogun/classifier/svm/LibLinear.h
Expand Up @@ -142,25 +142,7 @@ class CLibLinear : public CLinearMachine
}

/** set the linear term for qp */
inline void set_linear_term(const SGVector<float64_t>& linear_term)
{
if (!m_labels)
SG_ERROR("Please assign labels first!\n");

int32_t num_labels=m_labels->get_num_labels();

if (num_labels!=linear_term.vlen)
{
SG_ERROR("Number of labels (%d) does not match number"
" of entries (%d) in linear term \n", num_labels,
linear_term.vlen);
}

m_linear_term.destroy_vector();
m_linear_term.vector=CMath::clone_vector(linear_term.vector,
linear_term.vlen);
m_linear_term.vlen=linear_term.vlen;
}
void set_linear_term(const SGVector<float64_t> linear_term);

/** get the linear term for qp */
SGVector<float64_t> get_linear_term();
Expand Down
1 change: 0 additions & 1 deletion src/shogun/classifier/svm/SVMLight.cpp
Expand Up @@ -301,7 +301,6 @@ void CSVMLight::svm_learn()
int32_t totdoc=lab.vlen;
ASSERT(lab.vector && lab.vlen);
int32_t* label=CMath::clone_vector(lab.vector, lab.vlen);
lab.free_vector();

int32_t* docs=SG_MALLOC(int32_t, totdoc);
SG_FREE(W);
Expand Down
10 changes: 1 addition & 9 deletions src/shogun/converter/MultidimensionalScaling.cpp
Expand Up @@ -78,7 +78,6 @@ void CMultidimensionalScaling::init()

CMultidimensionalScaling::~CMultidimensionalScaling()
{
m_eigenvalues.destroy_vector();
}

SGVector<float64_t> CMultidimensionalScaling::get_eigenvalues() const
Expand Down Expand Up @@ -222,8 +221,7 @@ SGMatrix<float64_t> CMultidimensionalScaling::classic_embedding(SGMatrix<float64
}

// set eigenvalues vector
m_eigenvalues.destroy_vector();
m_eigenvalues = SGVector<float64_t>(eigenvalues_vector,m_target_dim,true);
m_eigenvalues = SGVector<float64_t>(eigenvalues_vector,m_target_dim);
#else /* not HAVE_ARPACK */
// using LAPACK
float64_t* eigenvalues_vector = SG_MALLOC(float64_t, N);
Expand All @@ -234,16 +232,12 @@ SGMatrix<float64_t> CMultidimensionalScaling::classic_embedding(SGMatrix<float64
ASSERT(eigenproblem_status==0);

// set eigenvalues vector
m_eigenvalues.destroy_vector();
m_eigenvalues = SGVector<float64_t>(m_target_dim);
m_eigenvalues.do_free = false;

// fill eigenvalues vector in backwards order
for (i=0; i<m_target_dim; i++)
m_eigenvalues.vector[i] = eigenvalues_vector[m_target_dim-i-1];

SG_FREE(eigenvalues_vector);

// construct embedding
for (i=0; i<m_target_dim; i++)
{
Expand Down Expand Up @@ -399,10 +393,8 @@ SGMatrix<float64_t> CMultidimensionalScaling::landmark_embedding(SGMatrix<float6
// cleanup
lmk_feature_matrix.destroy_matrix();
SG_FREE(current_dist_to_lmks);
lmk_idxs.destroy_vector();
SG_FREE(mean_sq_dist_vector);
SG_FREE(to_process);
lmk_idxs.destroy_vector();

return SGMatrix<float64_t>(new_feature_matrix,m_target_dim,total_N);
}
Expand Down
4 changes: 0 additions & 4 deletions src/shogun/converter/StochasticProximityEmbedding.cpp
Expand Up @@ -352,15 +352,11 @@ CSimpleFeatures< float64_t >* CStochasticProximityEmbedding::embed_distance(CDis
}

// Free memory
scale.destroy_vector();
D.destroy_vector();
Yd.destroy_matrix();
Rt.destroy_vector();
if ( m_strategy == SPE_LOCAL )
{
ind1Neighbors.destroy_matrix();
neighbors_mat.destroy_matrix();
J2.destroy_vector();
delete[] ind2;
}

Expand Down
35 changes: 29 additions & 6 deletions src/shogun/distributions/Gaussian.cpp
Expand Up @@ -59,9 +59,7 @@ void CGaussian::init()

CGaussian::~CGaussian()
{
m_d.destroy_vector();
m_u.destroy_matrix();
m_mean.free_vector();
}

bool CGaussian::train(CFeatures* data)
Expand All @@ -75,8 +73,6 @@ bool CGaussian::train(CFeatures* data)
}
CDotFeatures* dotdata=(CDotFeatures *) data;

m_mean.destroy_vector();

m_mean=dotdata->get_mean();
SGMatrix<float64_t> cov=dotdata->get_cov();

Expand Down Expand Up @@ -119,7 +115,6 @@ float64_t CGaussian::get_log_likelihood_example(int32_t num_example)
ASSERT(features->has_property(FP_DOT));
SGVector<float64_t> v=((CDotFeatures *)features)->get_computed_dot_feature_vector(num_example);
float64_t answer=compute_log_PDF(v);
v.free_vector();
return answer;
}

Expand Down Expand Up @@ -162,6 +157,35 @@ float64_t CGaussian::compute_log_PDF(const SGVector<float64_t>& point)
return -0.5*answer;
}

SGVector<float64_t> CGaussian::get_mean()
{
return m_mean;
}

void CGaussian::set_mean(const SGVector<float64_t> mean)
{
if (mean.vlen==1)
m_cov_type=SPHERICAL;

m_mean=mean;
}

void CGaussian::set_cov(SGMatrix<float64_t> cov)
{
ASSERT(cov.num_rows==cov.num_cols);
ASSERT(cov.num_rows==m_mean.vlen);
decompose_cov(cov);
init();
if (cov.do_free)
cov.free_matrix();
}

void CGaussian::set_d(const SGVector<float64_t> d)
{
m_d = d;
init();
}

SGMatrix<float64_t> CGaussian::get_cov()
{
float64_t* cov=SG_MALLOC(float64_t, m_mean.vlen*m_mean.vlen);
Expand Down Expand Up @@ -212,7 +236,6 @@ void CGaussian::register_params()

void CGaussian::decompose_cov(SGMatrix<float64_t> cov)
{
m_d.destroy_vector();
switch (m_cov_type)
{
case FULL:
Expand Down
31 changes: 4 additions & 27 deletions src/shogun/distributions/Gaussian.h
Expand Up @@ -120,23 +120,13 @@ class CGaussian : public CDistribution
*
* @return mean
*/
virtual inline SGVector<float64_t> get_mean()
{
return m_mean;
}
virtual SGVector<float64_t> get_mean();

/** set mean
*
* @param mean new mean
*/
virtual inline void set_mean(const SGVector<float64_t>& mean)
{
m_mean.destroy_vector();
if (mean.vlen==1)
m_cov_type=SPHERICAL;

m_mean=mean;
}
virtual void set_mean(const SGVector<float64_t> mean);

/** get covariance
*
Expand All @@ -150,15 +140,7 @@ class CGaussian : public CDistribution
*
* @param cov new covariance
*/
virtual inline void set_cov(SGMatrix<float64_t> cov)
{
ASSERT(cov.num_rows==cov.num_cols);
ASSERT(cov.num_rows==m_mean.vlen);
decompose_cov(cov);
init();
if (cov.do_free)
cov.free_matrix();
}
virtual void set_cov(SGMatrix<float64_t> cov);

/** get covariance type
*
Expand Down Expand Up @@ -193,12 +175,7 @@ class CGaussian : public CDistribution
*
* @param d new diagonal
*/
inline void set_d(const SGVector<float64_t>& d)
{
m_d.destroy_vector();
m_d = d;
init();
}
void set_d(const SGVector<float64_t> d);

/** get unitary matrix
*
Expand Down
2 changes: 0 additions & 2 deletions src/shogun/evaluation/ClusteringEvaluation.cpp
Expand Up @@ -78,7 +78,5 @@ void CClusteringEvaluation::best_map(CLabels* predicted, CLabels* ground_truth)
for (int32_t i= 0; i < predicted_ilabels.vlen; ++i)
predicted->set_int_label(i, label_map[predicted_ilabels[i]]);

label_p.free_vector();
label_g.free_vector();
G.destroy_matrix();
}
2 changes: 0 additions & 2 deletions src/shogun/evaluation/ClusteringMutualInformation.cpp
Expand Up @@ -65,8 +65,6 @@ float64_t CClusteringMutualInformation::evaluate(CLabels* predicted, CLabels* gr
entropy_p += -G_colsum[i] * log(G_colsum[i])/log(2.);
}

label_p.free_vector();
label_g.free_vector();
G.destroy_matrix();

return mutual_info / std::max(entropy_g, entropy_p);
Expand Down
4 changes: 0 additions & 4 deletions src/shogun/evaluation/CrossValidation.cpp
Expand Up @@ -259,8 +259,6 @@ float64_t CCrossValidation::evaluate_one_run()

/* clean up */
SG_UNREF(result_labels);
inverse_subset_indices.destroy_vector();
subset_indices.destroy_vector();
}
}
else
Expand Down Expand Up @@ -304,8 +302,6 @@ float64_t CCrossValidation::evaluate_one_run()
/* clean up, remove subsets */
SG_UNREF(result_labels);
m_labels->remove_subset();
inverse_subset_indices.destroy_vector();
subset_indices.destroy_vector();
}
}

Expand Down
3 changes: 0 additions & 3 deletions src/shogun/evaluation/CrossValidationSplitting.cpp
Expand Up @@ -64,7 +64,4 @@ void CCrossValidationSplitting::build_subsets()
* elements, which happens if the number of class labels is not equal to
* the number of subsets */
m_subset_indices->shuffle();

/* clean up */
indices.destroy_vector();
}
1 change: 0 additions & 1 deletion src/shogun/evaluation/PRCEvaluation.cpp
Expand Up @@ -35,7 +35,6 @@ float64_t CPRCEvaluation::evaluate(CLabels* predicted, CLabels* ground_truth)
SGVector<float64_t> orig_labels = predicted->get_labels();
int32_t length = orig_labels.vlen;
float64_t* labels = CMath::clone_vector(orig_labels.vector, length);
orig_labels.free_vector();

// get indexes for sort
int32_t* idxs = SG_MALLOC(int32_t, length);
Expand Down
1 change: 0 additions & 1 deletion src/shogun/evaluation/ROCEvaluation.cpp
Expand Up @@ -40,7 +40,6 @@ float64_t CROCEvaluation::evaluate(CLabels* predicted, CLabels* ground_truth)
SGVector<float64_t> orig_labels = predicted->get_labels();
int32_t length = orig_labels.vlen;
float64_t* labels = CMath::clone_vector(orig_labels.vector, length);
orig_labels.free_vector();

// get sorted indexes
int32_t* idxs = SG_MALLOC(int32_t, length);
Expand Down
6 changes: 0 additions & 6 deletions src/shogun/evaluation/StratifiedCrossValidationSplitting.cpp
Expand Up @@ -49,9 +49,6 @@ CStratifiedCrossValidationSplitting::CStratifiedCrossValidationSplitting(
"subset!\n", labels_per_class.vector[i], classes.vector[i], num_subsets);
}
}

labels_per_class.destroy_vector();
classes.destroy_vector();
}

void CStratifiedCrossValidationSplitting::build_subsets()
Expand Down Expand Up @@ -116,7 +113,4 @@ void CStratifiedCrossValidationSplitting::build_subsets()
* elements, which happens if the number of class labels is not equal to
* the number of subsets */
m_subset_indices->shuffle();

/* clean up */
unique_labels.destroy_vector();
}
14 changes: 1 addition & 13 deletions src/shogun/lib/SGVector.h
Expand Up @@ -25,7 +25,7 @@ template<class T> class SGVector
SGVector(T* v, index_t len, bool ref_counting=true)
: vector(v), vlen(len), m_refcount(NULL)
{
if(ref_counting)
if (ref_counting)
m_refcount=SG_CALLOC(int32_t, 1);
}

Expand Down Expand Up @@ -263,18 +263,6 @@ template<class T> class SGVector
return vector[index];
}

/** free vector */
virtual void free_vector()
{
unref();
}

/** destroy vector */
virtual void destroy_vector()
{
free_vector();
}

/** display array size */
void display_size() const
{
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/machine/KernelMachine.cpp
Expand Up @@ -191,8 +191,8 @@ SGVector<float64_t> CKernelMachine::get_alphas()

bool CKernelMachine::create_new_model(int32_t num)
{
m_alpha.destroy_vector();
m_svs.destroy_vector();
m_alpha.unref();
m_svs.unref();

m_bias=0;

Expand Down

0 comments on commit 9d458c8

Please sign in to comment.