Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
Merge pull request #404 from shelhamer/whitespace-cleanup
Trailing whitespace cleanup
  • Loading branch information
Soeren Sonnenburg committed Mar 30, 2012
2 parents 25f5321 + b763e52 commit 17ea108
Show file tree
Hide file tree
Showing 336 changed files with 2,718 additions and 2,718 deletions.
2 changes: 1 addition & 1 deletion src/shogun/base/DynArray.h
Expand Up @@ -289,7 +289,7 @@ template <class T> class DynArray
*resize_granularity;

T* p;

if (use_sg_mallocs)
p = SG_REALLOC(T, array, new_num_elements);
else
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/base/Parallel.h
Expand Up @@ -57,11 +57,11 @@ class Parallel
int32_t get_num_cpus() const;

/** set number of threads
* @param n number of threads
* @param n number of threads
*/
void set_num_threads(int32_t n);

/** get number of threads
/** get number of threads
* @return number of threads
*/
int32_t get_num_threads() const;
Expand Down
6 changes: 3 additions & 3 deletions src/shogun/base/Parameter.h
Expand Up @@ -26,7 +26,7 @@ struct TParameter
* @param parameter pointer to parameter
* @param name name of parameter
* @param description description of parameter
*/
*/
explicit TParameter(const TSGDataType* datatype, void* parameter,
const char* name, const char* description);

Expand Down Expand Up @@ -122,8 +122,8 @@ struct TParameter
const char* prefix);
};

/** @brief Parameter class
*
/** @brief Parameter class
*
* Must not be an CSGObject to prevent a recursive call of
* constructors.
*/
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/base/Version.cpp
Expand Up @@ -111,7 +111,7 @@ int64_t Version::get_version_in_minutes()
return ((((version_year)*12 + version_month)*30 + version_day)* 24 + version_hour)*60 + version_minute;
}

/** ref object
/** ref object
* @return ref count
*/
int32_t Version::ref()
Expand All @@ -121,7 +121,7 @@ int32_t Version::ref()
}

/** ref count
* @return ref count
* @return ref count
*/
int32_t Version::ref_count() const
{
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/base/Version.h
Expand Up @@ -69,13 +69,13 @@ class Version
/** get version in minutes */
static int64_t get_version_in_minutes();

/** ref object
/** ref object
* @return ref count
*/
int32_t ref();

/** ref count
* @return ref count
* @return ref count
*/
int32_t ref_count() const;

Expand Down
14 changes: 7 additions & 7 deletions src/shogun/classifier/ConjugateIndex.cpp
Expand Up @@ -33,7 +33,7 @@ CConjugateIndex::CConjugateIndex(CFeatures* train_features, CLabels* train_label
m_classes = NULL;
};

CConjugateIndex::~CConjugateIndex()
CConjugateIndex::~CConjugateIndex()
{
clean_classes();
m_feature_vector.destroy_vector();
Expand Down Expand Up @@ -65,7 +65,7 @@ void CConjugateIndex::clean_classes()
}
}

bool CConjugateIndex::train(CFeatures* train_features)
bool CConjugateIndex::train(CFeatures* train_features)
{
if (train_features)
set_features(train_features);
Expand All @@ -85,7 +85,7 @@ bool CConjugateIndex::train(CFeatures* train_features)
m_feature_vector = SGVector<float64_t>(num_features);

SG_PROGRESS(0,0,m_num_classes-1);

for (int32_t label=0; label<m_num_classes; label++)
{
int32_t count = 0;
Expand Down Expand Up @@ -124,13 +124,13 @@ bool CConjugateIndex::train(CFeatures* train_features)
1.0,matrix.matrix,count,
class_feature_matrix.matrix,num_features,
0.0,helper_matrix.matrix,count);

cblas_dgemm(CblasColMajor,CblasNoTrans,CblasNoTrans,
num_features,num_features,count,
1.0,class_feature_matrix.matrix,num_features,
helper_matrix.matrix,count,
0.0,m_classes[label].matrix,num_features);

SG_PROGRESS(label+1,0,m_num_classes);
helper_matrix.destroy_matrix();
class_feature_matrix.destroy_matrix();
Expand All @@ -141,7 +141,7 @@ bool CConjugateIndex::train(CFeatures* train_features)
return true;
};

CLabels* CConjugateIndex::apply(CFeatures* test_features)
CLabels* CConjugateIndex::apply(CFeatures* test_features)
{
set_features(test_features);

Expand Down Expand Up @@ -187,7 +187,7 @@ float64_t CConjugateIndex::conjugate_index(SGVector<float64_t> feature_vector, i
return product/norm;
};

float64_t CConjugateIndex::apply(int32_t index)
float64_t CConjugateIndex::apply(int32_t index)
{
int32_t predicted_label = 0;
float64_t max_conjugate_index = 0.0;
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/classifier/ConjugateIndex.h
Expand Up @@ -15,7 +15,7 @@
#include <shogun/mathematics/Math.h>
#include <shogun/features/SimpleFeatures.h>

namespace shogun
namespace shogun
{

class CLabels;
Expand All @@ -29,7 +29,7 @@ class CFeatures;
* Building of classifiers based on conjugation indices
*
* Currently supports only multiclass problems.
* Useless for datasets with # of dimensions less than # of class vectors.
* Useless for datasets with # of dimensions less than # of class vectors.
*/
class CConjugateIndex : public CMachine
{
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/LDA.cpp
Expand Up @@ -124,7 +124,7 @@ bool CLDA::train_machine(CFeatures* data)
}
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans, nf, nf,
(int) num_neg, 1.0, buffer, nf, buffer, nf, 0, scatter, nf);

//mean pos
for (i=0; i<num_pos; i++)
{
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/LPBoost.cpp
Expand Up @@ -173,7 +173,7 @@ bool CLPBoost::train_machine(CFeatures* data)
solver.cleanup();

cleanup();

return true;
}
#endif
4 changes: 2 additions & 2 deletions src/shogun/classifier/PluginEstimate.cpp
Expand Up @@ -89,7 +89,7 @@ bool CPluginEstimate::train_machine(CFeatures* data)

SG_FREE(pos_indizes);
SG_FREE(neg_indizes);

return true;
}

Expand Down Expand Up @@ -130,7 +130,7 @@ float64_t CPluginEstimate::apply(int32_t vec_idx)

if ((!pos_model) || (!neg_model))
SG_ERROR( "model(s) not assigned\n");

float64_t result=pos_model->get_log_likelihood_example(vector, len) - neg_model->get_log_likelihood_example(vector, len);
features->free_feature_vector(vector, vec_idx, free_vec);
return result;
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/PluginEstimate.h
Expand Up @@ -187,7 +187,7 @@ class CPluginEstimate: public CMachine
{
return pos_model->get_num_model_parameters()+neg_model->get_num_model_parameters();
}

/** check models
*
* @return if one of the two models is invalid
Expand Down
10 changes: 5 additions & 5 deletions src/shogun/classifier/QDA.cpp
Expand Up @@ -102,8 +102,8 @@ CLabels* CQDA::apply()

}

cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, num_vecs, m_dim,
m_dim, 1.0, X.matrix, num_vecs, m_M.get_matrix(k), m_dim, 0.0,
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, num_vecs, m_dim,
m_dim, 1.0, X.matrix, num_vecs, m_M.get_matrix(k), m_dim, 0.0,
A.matrix, num_vecs);

for ( i = 0 ; i < num_vecs ; ++i )
Expand Down Expand Up @@ -248,7 +248,7 @@ bool CQDA::train_machine(CFeatures* data)

rf->free_feature_vector(vec, class_idxs[k*num_vec + i], vfree);
}

for ( j = 0 ; j < m_dim ; ++j )
m_means[k*m_dim + j] /= class_nums[k];

Expand All @@ -264,7 +264,7 @@ bool CQDA::train_machine(CFeatures* data)
float64_t * col = scalings.get_column_vector(k);
float64_t * rot_mat = rotations.get_matrix(k);

wrap_dgesvd(jobu, jobvt, m, n, buffer.matrix, lda, col, NULL, ldu,
wrap_dgesvd(jobu, jobvt, m, n, buffer.matrix, lda, col, NULL, ldu,
rot_mat, ldvt, &info);
ASSERT(info == 0);
buffer.destroy_matrix();
Expand All @@ -281,7 +281,7 @@ bool CQDA::train_machine(CFeatures* data)
for ( i = 0 ; i < m_dim ; ++i )
for ( j = 0 ; j < m_dim ; ++j )
M[i + j*m_dim] *= scalings[k*m_dim + j];

cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans, n, n, n, 1.0,
M.matrix, n, rot_mat, n, 0.0, m_covs.get_matrix(k), n);

Expand Down
8 changes: 4 additions & 4 deletions src/shogun/classifier/QDA.h
Expand Up @@ -125,7 +125,7 @@ class CQDA : public CMachine
*
* @return mean vector of class i
*/
inline SGVector< float64_t > get_mean(int32_t i) const
inline SGVector< float64_t > get_mean(int32_t i) const
{
return SGVector< float64_t >(m_means.get_column_vector(i), m_dim);
}
Expand All @@ -136,9 +136,9 @@ class CQDA : public CMachine
*
* @return covariance matrix of class i
*/
inline SGMatrix< float64_t > get_cov(int32_t i) const
{
return SGMatrix< float64_t >(m_covs.get_matrix(i), m_dim, m_dim);
inline SGMatrix< float64_t > get_cov(int32_t i) const
{
return SGMatrix< float64_t >(m_covs.get_matrix(i), m_dim, m_dim);
}

protected:
Expand Down
6 changes: 3 additions & 3 deletions src/shogun/classifier/SubGradientLPM.cpp
Expand Up @@ -263,7 +263,7 @@ float64_t CSubGradientLPM::line_search(int32_t num_feat, int32_t num_vec)
{
float64_t p=get_label(i-num_feat)*(features->dense_dot(i-num_feat, grad_w, num_feat)+grad_b);
grad_proj[i-num_feat]=p;

A[i]=0;
B[i]=0;
C[i]=C1*p;
Expand Down Expand Up @@ -411,7 +411,7 @@ float64_t CSubGradientLPM::compute_min_subgradient(
float64_t CSubGradientLPM::compute_objective(int32_t num_feat, int32_t num_vec)
{
float64_t result= CMath::sum_abs(w, num_feat);

for (int32_t i=0; i<num_vec; i++)
{
if (proj[i]<1.0)
Expand Down Expand Up @@ -601,7 +601,7 @@ bool CSubGradientLPM::train_machine(CFeatures* data)
//SG_PRINT("sum_Cy_active: %f\n", sum_Cy_active);
//CMath::display_vector(grad_w, num_feat, "grad_w");
//SG_PRINT("grad_b:%f\n", grad_b);

dir_deriv=compute_min_subgradient(num_feat, num_vec, num_active, num_bound);

alpha=line_search(num_feat, num_vec);
Expand Down
8 changes: 4 additions & 4 deletions src/shogun/classifier/mkl/MKL.cpp
Expand Up @@ -319,7 +319,7 @@ bool CMKL::train_machine(CFeatures* data)
{
float64_t* sumw = SG_MALLOC(float64_t, num_kernels);



while (true)
{
Expand Down Expand Up @@ -367,15 +367,15 @@ void CMKL::set_mkl_norm(float64_t norm)

if (norm<1)
SG_ERROR("Norm must be >= 1, e.g., 1-norm is the standard MKL; norms>1 nonsparse MKL\n");

mkl_norm = norm;
}

void CMKL::set_elasticnet_lambda(float64_t lambda)
{
if (lambda>1 || lambda<0)
SG_ERROR("0<=lambda<=1\n");

if (lambda==0)
lambda = 1e-6;
else if (lambda==1.0)
Expand All @@ -388,7 +388,7 @@ void CMKL::set_mkl_block_norm(float64_t q)
{
if (q<1)
SG_ERROR("1<=q<=inf\n");

mkl_block_norm=q;
}

Expand Down
10 changes: 5 additions & 5 deletions src/shogun/classifier/mkl/MKL.h
Expand Up @@ -30,7 +30,7 @@ extern "C" {
namespace shogun
{
/** @brief Multiple Kernel Learning
*
*
* A support vector machine based method for use with multiple kernels. In
* Multiple Kernel Learning (MKL) in addition to the SVM \f$\bf\alpha\f$ and
* bias term \f$b\f$ the kernel weights \f$\bf\beta\f$ are estimated in
Expand All @@ -48,7 +48,7 @@ namespace shogun
*
* Kernels have to be chosen a-priori. In MKL \f$\alpha_i,\;\beta\f$ and bias are determined
* by solving the following optimization program
*
*
* \f{eqnarray*}
* \mbox{min} && \gamma-\sum_{i=1}^N\alpha_i\\
* \mbox{w.r.t.} && \gamma\in R, \alpha\in R^N \nonumber\\
Expand Down Expand Up @@ -88,7 +88,7 @@ namespace shogun
* \mbox{w.r.t.} && f_1\in\mathcal{H}_1,f_2\in\mathcal{H}_2,\ldots,f_K\in\mathcal{H}_K,\,b\in R \nonumber\\
* \f}
* where \f$\ell\f$ is a loss function. Here \f$\lambda\f$ controls the trade-off between the two regularization terms. \f$\lambda=0\f$ corresponds to \f$L_1\f$-MKL, whereas \f$\lambda=1\f$ corresponds to the uniform-weighted combination of kernels (\f$L_\infty\f$-MKL). This approach was studied by Shawe-Taylor (2008) "Kernel Learning for Novelty Detection" (NIPS MKL Workshop 2008) and Tomioka & Suzuki (2009) "Sparsity-accuracy trade-off in MKL" (NIPS MKL Workshop 2009).
*
*
*/
class CMKL : public CSVM
{
Expand Down Expand Up @@ -199,7 +199,7 @@ class CMKL : public CSVM
* @return computed dual objective
*/
float64_t compute_elasticnet_dual_objective();

/** set mkl epsilon (optimization accuracy for kernel weights)
*
* @param eps new weight_epsilon
Expand Down Expand Up @@ -452,7 +452,7 @@ class CMKL : public CSVM
/** norm used in mkl must be > 0 */
float64_t mkl_norm;
/** Sparsity trade-off parameter used in ElasticnetMKL
must be 0<=lambda<=1
must be 0<=lambda<=1
lambda=0: L1-MKL
lambda=1: Linfinity-MKL
*/
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/mkl/MKLClassification.h
Expand Up @@ -16,7 +16,7 @@
namespace shogun
{
/** @brief Multiple Kernel Learning for two-class-classification
*
*
* Learns an SVM classifier and its kernel weights. Makes only sense if
* multiple kernels are used.
*
Expand Down

0 comments on commit 17ea108

Please sign in to comment.