Skip to content

Commit

Permalink
Merge pull request #738 from karlnapf/master
Browse files Browse the repository at this point in the history
memory bug fix
  • Loading branch information
karlnapf committed Aug 18, 2012
2 parents 38642f1 + 8e8fff9 commit 7cfee21
Show file tree
Hide file tree
Showing 2 changed files with 41 additions and 5 deletions.
Expand Up @@ -113,6 +113,22 @@ void test_mkl_cross_validation()
CStatistics::variance(weights, false).display_vector("variance per kernel");
CStatistics::std_deviation(weights, false).display_vector("std-dev per kernel");

SG_UNREF(result);

/* again for two runs */
cross->set_num_runs(2);
result=cross->evaluate();

/* print mkl weights */
weights=mkl_storage->get_mkl_weights();
weights.display_matrix("mkl weights");

/* print mean and variance of each kernel weight. These could for example
* been used to compute confidence intervals */
CStatistics::mean(weights, false).display_vector("mean per kernel");
CStatistics::variance(weights, false).display_vector("variance per kernel");
CStatistics::std_deviation(weights, false).display_vector("std-dev per kernel");

/* clean up */
SG_UNREF(result);
SG_UNREF(cross);
Expand Down
30 changes: 25 additions & 5 deletions src/shogun/evaluation/CrossValidationMKLStorage.cpp
Expand Up @@ -30,19 +30,39 @@ void CCrossValidationMKLStorage::update_trained_machine(

SGVector<float64_t> w=kernel->get_subkernel_weights();

/* evtl re-allocate memory (different number of runs from evaluation before) */
if (m_mkl_weights.num_rows!=w.vlen ||
m_mkl_weights.num_cols!=m_num_folds*m_num_runs)
{
if (m_mkl_weights.matrix)
{
SG_DEBUG("deleting memory for mkl weight matrix\n");
m_mkl_weights=SGMatrix<float64_t>();
}
}

/* evtl allocate memory (first call) */
if (!m_mkl_weights.matrix)
{
SG_PRINT("allocating memory for mkl weight matrix\n");
m_mkl_weights=SGMatrix<float64_t>(w.vlen, m_num_folds*m_num_runs);
SG_DEBUG("allocating memory for mkl weight matrix\n");
m_mkl_weights=SGMatrix<float64_t>(w.vlen,m_num_folds*m_num_runs);
}

/* put current mkl weights into matrix, copy memory vector wise to make
* things fast */
index_t n=m_current_run_index*m_current_fold_index;
index_t first_idx=n*w.vlen+m_current_fold_index*w.vlen;
* things fast. Compute index of address to where vector goes */

/* number of runs is w.vlen*m_num_folds shift */
index_t run_shift=m_current_run_index*w.vlen*m_num_folds;

/* fold shift is m_current_fold_index*w-vlen */
index_t fold_shift=m_current_fold_index*w.vlen;

/* add both index shifts */
index_t first_idx=run_shift+fold_shift;
SG_DEBUG("run %d, fold %d, matrix index %d\n",m_current_run_index,
m_current_fold_index, first_idx);

/* copy memory */
memcpy(&m_mkl_weights.matrix[first_idx], w.vector,
w.vlen*sizeof(float64_t));

Expand Down

0 comments on commit 7cfee21

Please sign in to comment.