Skip to content

Commit

Permalink
PTHREAD ifdefing
Browse files Browse the repository at this point in the history
  • Loading branch information
lisitsyn committed Jan 31, 2012
1 parent ff2eba8 commit c36dfbc
Show file tree
Hide file tree
Showing 11 changed files with 25 additions and 21 deletions.
2 changes: 2 additions & 0 deletions src/shogun/classifier/svm/SVM_libsvm.cpp
Expand Up @@ -284,6 +284,7 @@ class LibSVMKernel: public QMatrix {
}
else
{
#ifdef HAVE_PTHREAD
int32_t total_num=(len-start);
pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
Q_THREAD_PARAM* params = SG_MALLOC(Q_THREAD_PARAM, num_threads);
Expand Down Expand Up @@ -329,6 +330,7 @@ class LibSVMKernel: public QMatrix {

SG_FREE(params);
SG_FREE(threads);
#endif /* HAVE_PTHREAD */
}
}

Expand Down
6 changes: 4 additions & 2 deletions src/shogun/classifier/svm/WDSVMOcas.cpp
Expand Up @@ -369,6 +369,7 @@ int CWDSVMOcas::add_new_cut(
step=1;
}

#ifdef HAVE_PTHREAD
for (t=0; t<nthreads; t++)
{
params_add[t].wdocas=o;
Expand Down Expand Up @@ -407,7 +408,7 @@ int CWDSVMOcas::add_new_cut(
// new_a[i]+=a[i];
//SG_FREE(a);
}

#endif /* HAVE_PTHREAD */
for(i=0; i < cut_length; i++)
{
if (o->use_bias)
Expand Down Expand Up @@ -552,7 +553,7 @@ int CWDSVMOcas::compute_output( float64_t *output, void* ptr )
nthreads=nData-1;
step=1;
}

#ifdef HAVE_PTHREAD
for (t=0; t<nthreads; t++)
{
params_output[t].wdocas=o;
Expand Down Expand Up @@ -589,6 +590,7 @@ int CWDSVMOcas::compute_output( float64_t *output, void* ptr )
SG_FREE(params_output);
SG_FREE(val);
SG_FREE(out);
#endif /* HAVE_PTHREAD */
return 0;
}
/*----------------------------------------------------------------------
Expand Down
5 changes: 2 additions & 3 deletions src/shogun/converter/HessianLocallyLinearEmbedding.cpp
Expand Up @@ -87,8 +87,8 @@ SGMatrix<float64_t> CHessianLocallyLinearEmbedding::construct_weight_matrix(CSim
int32_t dp = m_target_dim*(m_target_dim+1)/2;
if (m_k<(1+m_target_dim+dp))
SG_ERROR("K parameter should have value greater than 1+target dimensionality+dp.\n");
int32_t t;
#ifdef HAVE_PTHREAD
int32_t t;
int32_t num_threads = parallel->get_num_threads();
ASSERT(num_threads>0);
// allocate threads and params
Expand Down Expand Up @@ -146,11 +146,10 @@ SGMatrix<float64_t> CHessianLocallyLinearEmbedding::construct_weight_matrix(CSim
SG_FREE(threads);
#else
HESSIANESTIMATION_THREAD_PARAM single_thread_param;
single_thread_param.idx_start = t;
single_thread_param.idx_start = 0;
single_thread_param.idx_step = num_threads;
single_thread_param.idx_stop = N;
single_thread_param.m_k = m_k;
single_thread_param.dim = dim;
single_thread_param.neighborhood_matrix = neighborhood_matrix;
single_thread_param.feature_matrix = feature_matrix;
single_thread_param.local_feature_matrix = local_feature_matrix;
Expand Down
6 changes: 3 additions & 3 deletions src/shogun/converter/Isomap.cpp
Expand Up @@ -118,7 +118,7 @@ SGMatrix<float64_t> CIsomap::process_distance_matrix(SGMatrix<float64_t> distanc

SGMatrix<float64_t> CIsomap::isomap_distance(SGMatrix<float64_t> D_matrix)
{
int32_t N,t,i;
int32_t N,i;
N = D_matrix.num_cols;
if (D_matrix.num_cols!=D_matrix.num_rows)
{
Expand Down Expand Up @@ -156,7 +156,7 @@ SGMatrix<float64_t> CIsomap::isomap_distance(SGMatrix<float64_t> D_matrix)
delete coverTree;

#ifdef HAVE_PTHREAD

int32_t t;
// Parallel Dijkstra with Fibonacci Heap
int32_t num_threads = parallel->get_num_threads();
ASSERT(num_threads>0);
Expand Down Expand Up @@ -207,7 +207,7 @@ SGMatrix<float64_t> CIsomap::isomap_distance(SGMatrix<float64_t> D_matrix)
SG_FREE(parameters);
SG_FREE(threads);
#else
D_THREAD_PARAM single_thread_param;
DIJKSTRA_THREAD_PARAM single_thread_param;
single_thread_param.i_start = 0;
single_thread_param.i_stop = N;
single_thread_param.i_step = 1;
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/converter/KernelLocalTangentSpaceAlignment.cpp
Expand Up @@ -121,7 +121,7 @@ SGMatrix<float64_t> CKernelLocalTangentSpaceAlignment::construct_weight_matrix(S
SG_FREE(parameters);
SG_FREE(threads);
#else
KLTSA_THREAD_PARAM single_thread_param = {0,1,N,m_k,m_target_dim,neighborhood_matrix.matrix,
KLTSA_THREAD_PARAM single_thread_param = {0,1,N,m_k,m_target_dim,N,neighborhood_matrix.matrix,
kernel_matrix.matrix,local_gram_matrix,ev_vector,
G_matrix,W_matrix};
run_kltsa_thread((void*)&single_thread_param);
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/converter/KernelLocallyLinearEmbedding.cpp
Expand Up @@ -154,8 +154,8 @@ SGMatrix<float64_t> CKernelLocallyLinearEmbedding::construct_weight_matrix(SGMat
{
int32_t N = kernel_matrix.num_cols;
// loop variables
int32_t t;
#ifdef HAVE_PTHREAD
int32_t t;
int32_t num_threads = parallel->get_num_threads();
ASSERT(num_threads>0);
// allocate threads
Expand Down Expand Up @@ -205,7 +205,7 @@ SGMatrix<float64_t> CKernelLocallyLinearEmbedding::construct_weight_matrix(SGMat
single_thread_param.kernel_matrix = kernel_matrix.matrix;
single_thread_param.id_vector = id_vector;
single_thread_param.W_matrix = W_matrix;
run_linearreconstruction_thread((void*)single_thread_param);
run_linearreconstruction_thread((void*)&single_thread_param);
#endif

// clean
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/converter/LocallyLinearEmbedding.cpp
Expand Up @@ -337,8 +337,8 @@ SGMatrix<float64_t> CLocallyLinearEmbedding::construct_weight_matrix(CSimpleFeat
{
int32_t N = simple_features->get_num_vectors();
int32_t dim = simple_features->get_num_features();
int32_t t;
#ifdef HAVE_PTHREAD
int32_t t;
int32_t num_threads = parallel->get_num_threads();
ASSERT(num_threads>0);
// allocate threads
Expand Down Expand Up @@ -392,7 +392,7 @@ SGMatrix<float64_t> CLocallyLinearEmbedding::construct_weight_matrix(CSimpleFeat
single_thread_param.id_vector = id_vector;
single_thread_param.W_matrix = W_matrix;
single_thread_param.m_reconstruction_shift = m_reconstruction_shift;
run_linearreconstruction_thread((void*)single_thread_param);
run_linearreconstruction_thread((void*)&single_thread_param);
#endif

// clean
Expand Down
3 changes: 2 additions & 1 deletion src/shogun/converter/MultidimensionalScaling.cpp
Expand Up @@ -272,7 +272,7 @@ SGMatrix<float64_t> CMultidimensionalScaling::landmark_embedding(SGMatrix<float6
ASSERT(m_target_dim>0);
ASSERT(distance_matrix.num_cols==distance_matrix.num_rows);
int32_t lmk_N = m_landmark_number;
int32_t i,j,t;
int32_t i,j;
int32_t total_N = distance_matrix.num_cols;
if (lmk_N<3)
{
Expand Down Expand Up @@ -340,6 +340,7 @@ SGMatrix<float64_t> CMultidimensionalScaling::landmark_embedding(SGMatrix<float6

// get embedding for non-landmark vectors
#ifdef HAVE_PTHREAD
int32_t t;
int32_t num_threads = parallel->get_num_threads();
ASSERT(num_threads>0);
// allocate threads and it's parameters
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/io/InputParser.h
Expand Up @@ -502,7 +502,7 @@ template <class T> void* CInputParser<T>::main_parse_loop(void* params)
{
// Read the examples into current_* objects
// Instead of allocating mem for new objects each time

#ifdef HAVE_PTHREAD
CInputParser* this_obj = (CInputParser *) params;
this->input_source = this_obj->input_source;

Expand Down Expand Up @@ -548,7 +548,7 @@ template <class T> void* CInputParser<T>::main_parse_loop(void* params)
pthread_cond_signal(&examples_state_changed);
pthread_mutex_unlock(&examples_state_lock);
}

#endif /* HAVE_PTHREAD */
return NULL;
}

Expand Down
8 changes: 4 additions & 4 deletions src/shogun/kernel/CombinedKernel.cpp
Expand Up @@ -428,7 +428,7 @@ void CCombinedKernel::emulate_compute_batch(
params.vec_idx = vec_idx;
compute_optimized_kernel_helper((void*) &params);
}
#ifndef WIN32
#ifdef HAVE_PTHREAD
else
{
pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
Expand Down Expand Up @@ -460,7 +460,7 @@ void CCombinedKernel::emulate_compute_batch(
SG_FREE(params);
SG_FREE(threads);
}
#endif
#endif /* HAVE_PTHREAD */

k->delete_optimization();
}
Expand Down Expand Up @@ -488,7 +488,7 @@ void CCombinedKernel::emulate_compute_batch(
params.num_suppvec = num_suppvec;
compute_kernel_helper((void*) &params);
}
#ifndef WIN32
#ifdef HAVE_PTHREAD
else
{
pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
Expand Down Expand Up @@ -526,7 +526,7 @@ void CCombinedKernel::emulate_compute_batch(
SG_FREE(params);
SG_FREE(threads);
}
#endif
#endif /* HAVE_PTHREAD */
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/kernel/WeightedDegreeStringKernel.cpp
Expand Up @@ -907,7 +907,7 @@ void CWeightedDegreeStringKernel::compute_batch(
SG_PROGRESS(j,0,num_feat);
}
}
#ifndef WIN32
#ifdef HAVE_PTHREAD
else
{
CSignal::clear_cancel();
Expand Down

0 comments on commit c36dfbc

Please sign in to comment.