Skip to content

Commit

Permalink
Merge branch 'master' of git://github.com/shogun-toolbox/shogun into …
Browse files Browse the repository at this point in the history
…refactoring
  • Loading branch information
sabiroid committed Apr 14, 2012
2 parents da846bd + 1f77383 commit 9a4214c
Show file tree
Hide file tree
Showing 7 changed files with 42 additions and 18 deletions.
2 changes: 1 addition & 1 deletion src/configure
Expand Up @@ -3833,7 +3833,7 @@ test_modular()
echocheck "SWIG"
if ${SWIG} -version >/dev/null 2>&1
then
_swig_version=`swig -version 2>/dev/null | grep Version | cut -f 3 -d ' '`
_swig_version=`${SWIG} -version 2>/dev/null | grep Version | cut -f 3 -d ' '`
if assert_version swig $_swig $_swig_version 2.0.4; then
echores "$_swig_version"
_swig=yes
Expand Down
2 changes: 2 additions & 0 deletions src/interfaces/modular/Features.i
Expand Up @@ -47,6 +47,7 @@
%rename(LBPPyrDotFeatures) CLBPPyrDotFeatures;
%rename(ExplicitSpecFeatures) CExplicitSpecFeatures;
%rename(ImplicitWeightedSpecFeatures) CImplicitWeightedSpecFeatures;
%rename(Subset) CSubset;

/* Include Class Headers to make them visible from within the target language */
%include <shogun/features/FeatureTypes.h>
Expand Down Expand Up @@ -372,3 +373,4 @@ namespace shogun
%include <shogun/features/LBPPyrDotFeatures.h>
%include <shogun/features/ExplicitSpecFeatures.h>
%include <shogun/features/ImplicitWeightedSpecFeatures.h>
%include <shogun/features/Subset.h>
1 change: 1 addition & 0 deletions src/interfaces/modular/Features_includes.i
Expand Up @@ -32,4 +32,5 @@
#include <shogun/features/LBPPyrDotFeatures.h>
#include <shogun/features/ExplicitSpecFeatures.h>
#include <shogun/features/ImplicitWeightedSpecFeatures.h>
#include <shogun/features/Subset.h>
%}
41 changes: 31 additions & 10 deletions src/shogun/classifier/GaussianNaiveBayes.cpp
Expand Up @@ -126,6 +126,13 @@ bool CGaussianNaiveBayes::train(CFeatures* data)
m_label_prob.zero();
m_rates.zero();

// number of iterations in all cycles
int32_t max_progress = 2 * train_labels.vlen + 2 * m_num_classes;

// current progress
int32_t progress = 0;
SG_PROGRESS(progress, 0, max_progress);

// get sum of features among labels
for (i=0; i<train_labels.vlen; i++)
{
Expand All @@ -135,13 +142,19 @@ bool CGaussianNaiveBayes::train(CFeatures* data)
fea.free_vector();

m_label_prob.vector[train_labels.vector[i]]+=1.0;

progress++;
SG_PROGRESS(progress, 0, max_progress);
}

// get means of features of labels
for (i=0; i<m_num_classes; i++)
{
for (j=0; j<m_dim; j++)
m_means(j, i) /= m_label_prob.vector[i];

progress++;
SG_PROGRESS(progress, 0, max_progress);
}

// compute squared residuals with means available
Expand All @@ -154,20 +167,24 @@ bool CGaussianNaiveBayes::train(CFeatures* data)
CMath::sq(fea[j]-m_means(j, train_labels.vector[i]));
}
fea.free_vector();
}

progress++;
SG_PROGRESS(progress, 0, max_progress);
}

// get variance of features of labels
for (i=0; i<m_num_classes; i++)
{
for (j=0; j<m_dim; j++)
m_variances(j, i) /= m_label_prob.vector[i] > 1 ? m_label_prob.vector[i]-1 : 1;
}

// get a priori probabilities of labels
for (i=0; i<m_num_classes; i++)
{

// get a priori probabilities of labels
m_label_prob.vector[i]/= m_num_classes;

progress++;
SG_PROGRESS(progress, 0, max_progress);
}
SG_DONE();

train_labels.free_vector();

Expand All @@ -177,15 +194,19 @@ bool CGaussianNaiveBayes::train(CFeatures* data)
CLabels* CGaussianNaiveBayes::apply()
{
// init number of vectors
int32_t n = m_features->get_num_vectors();
int32_t num_vectors = m_features->get_num_vectors();

// init result labels
CLabels* result = new CLabels(n);
CLabels* result = new CLabels(num_vectors);

// classify each example of data
for (int i=0; i<n; i++)
SG_PROGRESS(0, 0, num_vectors);
for (int i = 0; i < num_vectors; i++)
{
result->set_label(i,apply(i));

SG_PROGRESS(i + 1, 0, num_vectors);
}
SG_DONE();
return result;
};

Expand Down
2 changes: 1 addition & 1 deletion src/shogun/preprocessor/PNorm.cpp
Expand Up @@ -117,7 +117,7 @@ void CPNorm::register_param ()

inline float64_t CPNorm::get_pnorm (float64_t* vec, int32_t vec_len) const
{
float64_t norm;
float64_t norm = 0.0;
if (m_p == 1.0)
{
for (int i = 0; i < vec_len; ++i)
Expand Down
7 changes: 5 additions & 2 deletions src/shogun/preprocessor/PNorm.h
Expand Up @@ -34,6 +34,9 @@ class CPNorm : public CSimplePreprocessor<float64_t>
/** default PNorm Constructor */
CPNorm ();

/** constructor
* @param p the norm to calculate. NOTE: has to be greater or equal than 1.0
*/
CPNorm (double p);

/** destructor */
Expand Down Expand Up @@ -65,12 +68,12 @@ class CPNorm : public CSimplePreprocessor<float64_t>

/**
* Set norm
* @param p norm value
* @param pnorm norm value
*/
void set_pnorm (double pnorm);

/**
* Get norm
* Get norm value
* @return norm
*/
double get_pnorm () const;
Expand Down
5 changes: 1 addition & 4 deletions src/shogun/regression/GaussianProcessRegression.cpp
Expand Up @@ -218,14 +218,11 @@ SGMatrix<float64_t> CGaussianProcessRegression::getCovarianceMatrix(CFeatures* d
//Indices used to solve Lv=K(X_test, X_train) for v
SGVector< int32_t > ipiv(CMath::min(m_L.num_rows, m_L.num_cols));

int info;


memcpy(temp1.matrix, kernel_test_matrix.matrix,
kernel_test_matrix.num_cols*kernel_test_matrix.num_rows*sizeof(float64_t));

//Get indices used to solve Lv=K(X_test, X_train) for v
dgetrf_(&m_L.num_rows, &m_L.num_cols, m_L.matrix, &m_L.num_cols, ipiv.vector, &info);
clapack_dgetrf(CblasColMajor, m_L.num_rows, m_L.num_cols, m_L.matrix, m_L.num_cols, ipiv.vector);

//Solve Lv=K(X_test, X_train) for v
clapack_dgetrs(CblasColMajor, CblasNoTrans,
Expand Down

0 comments on commit 9a4214c

Please sign in to comment.