Skip to content

Commit

Permalink
Merge pull request #437 from karlnapf/master
Browse files Browse the repository at this point in the history
memory bugfix, initialization, comments and debug messages
  • Loading branch information
karlnapf committed Apr 14, 2012
2 parents 277ba68 + 24bab87 commit 2f00592
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 10 deletions.
20 changes: 12 additions & 8 deletions src/shogun/classifier/mkl/MKL.cpp
Expand Up @@ -18,10 +18,9 @@

using namespace shogun;

CMKL::CMKL(CSVM* s)
: CSVM(), svm(NULL), C_mkl(0), mkl_norm(1), ent_lambda(0), beta_local(NULL),
mkl_iterations(0), mkl_epsilon(1e-5), interleaved_optimization(true),
w_gap(1.0), rho(0)
CMKL::CMKL(CSVM* s) : CSVM(), svm(NULL), C_mkl(0), mkl_norm(1), ent_lambda(0),
mkl_block_norm(1),beta_local(NULL), mkl_iterations(0), mkl_epsilon(1e-5),
interleaved_optimization(true), w_gap(1.0), rho(0)
{
set_constraint_generator(s);
#ifdef USE_CPLEX
Expand Down Expand Up @@ -1474,22 +1473,24 @@ void CMKL::compute_sum_beta(float64_t* sumw)

int32_t nsv=svm->get_num_support_vectors();
int32_t num_kernels = kernel->get_num_subkernels();
float64_t* beta = SG_MALLOC(float64_t, num_kernels);
SGVector<float64_t> beta=SGVector<float64_t>(num_kernels, true);
int32_t nweights=0;
const float64_t* old_beta = kernel->get_subkernel_weights(nweights);
ASSERT(nweights==num_kernels);
ASSERT(old_beta);

for (int32_t i=0; i<num_kernels; i++)
{
beta[i]=0;
beta.vector[i]=0;
sumw[i]=0;
}

for (int32_t n=0; n<num_kernels; n++)
{
beta[n]=1.0;
kernel->set_subkernel_weights(SGVector<float64_t>(beta, num_kernels));
beta.vector[n]=1.0;
/* this only copies the value of the first entry of this array
* so it may be freed safely afterwards. */
kernel->set_subkernel_weights(beta);

for (int32_t i=0; i<nsv; i++)
{
Expand All @@ -1506,6 +1507,9 @@ void CMKL::compute_sum_beta(float64_t* sumw)

mkl_iterations++;
kernel->set_subkernel_weights(SGVector<float64_t>( (float64_t*) old_beta, num_kernels));

/* safe because of above comment, otherwise: memleak */
beta.free_vector();
}


Expand Down
4 changes: 4 additions & 0 deletions src/shogun/kernel/CustomKernel.cpp
Expand Up @@ -57,12 +57,14 @@ CCustomKernel::init()
CCustomKernel::CCustomKernel()
: CKernel(10), kmatrix(), upper_diagonal(false)
{
SG_DEBUG("created CCustomKernel\n");
init();
}

CCustomKernel::CCustomKernel(CKernel* k)
: CKernel(10)
{
SG_DEBUG("created CCustomKernel\n");
init();

/* if constructed from a custom kernel, use same kernel matrix */
Expand All @@ -79,12 +81,14 @@ CCustomKernel::CCustomKernel(CKernel* k)
CCustomKernel::CCustomKernel(SGMatrix<float64_t> km)
: CKernel(10), upper_diagonal(false)
{
SG_DEBUG("created CCustomKernel\n");
init();
set_full_kernel_matrix_from_full(km);
}

CCustomKernel::~CCustomKernel()
{
SG_DEBUG("destroying CCustomKernel\n");
SG_UNREF(m_row_subset);
SG_UNREF(m_col_subset);
cleanup();
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/machine/KernelMachine.cpp
Expand Up @@ -461,6 +461,7 @@ void CKernelMachine::store_model_features()

bool CKernelMachine::train_locked(SGVector<index_t> indices)
{
SG_DEBUG("entering %s::train_locked()\n", get_name());
if (!is_data_locked())
SG_ERROR("CKernelMachine::train_locked() call data_lock() before!\n");

Expand All @@ -485,14 +486,13 @@ bool CKernelMachine::train_locked(SGVector<index_t> indices)
* and train does data_unlock */
bool result=train_machine();

// CMath::display_vector(get_support_vectors().vector, get_num_support_vectors(), "sv indices");

/* set col subset of kernel to contain all elements */
m_custom_kernel->remove_col_subset();

/* remove label subset after training */
m_labels->remove_subset();

SG_DEBUG("leaving %s::train_locked()\n", get_name());
return result;
}

Expand Down

0 comments on commit 2f00592

Please sign in to comment.