Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
Revert "Improved multithreading codestyle, fixes some structure"
This reverts commit 51eeb3d.
  • Loading branch information
lisitsyn committed Oct 2, 2011
1 parent 51eeb3d commit 15ebd97
Show file tree
Hide file tree
Showing 7 changed files with 274 additions and 61 deletions.
48 changes: 40 additions & 8 deletions src/shogun/preprocessor/HessianLocallyLinearEmbedding.cpp
Expand Up @@ -141,11 +141,26 @@ SGMatrix<float64_t> CHessianLocallyLinearEmbedding::construct_weight_matrix(CSim

for (t=0; t<num_threads; t++)
{
parameters[t] = (HESSIANESTIMATION_THREAD_PARAM){t,num_threads,N,m_k,dim,N,dp,target_dim,
neighborhood_matrix.matrix,feature_matrix.matrix,
local_feature_matrix+(m_k*dim)*t,Yi_matrix+(m_k*(1+target_dim+dp))*t,
mean_vector+dim*t,s_values_vector+dim*t,tau+tau_len*t,tau_len,w_sum_vector+dp*t,
q_matrix+(m_k*m_k)*t,W_matrix,&W_matrix_lock};
parameters[t].idx_start = t;
parameters[t].idx_step = num_threads;
parameters[t].idx_stop = N;
parameters[t].m_k = m_k;
parameters[t].dim = dim;
parameters[t].target_dim = target_dim;
parameters[t].N = N;
parameters[t].dp = dp;
parameters[t].neighborhood_matrix = neighborhood_matrix.matrix;
parameters[t].feature_matrix = feature_matrix.matrix;
parameters[t].local_feature_matrix = local_feature_matrix + (m_k*dim)*t;
parameters[t].Yi_matrix = Yi_matrix + (m_k*(1+target_dim+dp))*t;
parameters[t].mean_vector = mean_vector + dim*t;
parameters[t].s_values_vector = s_values_vector + dim*t;
parameters[t].tau = tau+tau_len*t;
parameters[t].tau_len = tau_len;
parameters[t].w_sum_vector = w_sum_vector + dp*t;
parameters[t].q_matrix = q_matrix + (m_k*m_k)*t;
parameters[t].W_matrix = W_matrix;
parameters[t].W_matrix_lock = &W_matrix_lock;
pthread_create(&threads[t], &attr, run_hessianestimation_thread, (void*)&parameters[t]);
}
for (t=0; t<num_threads; t++)
Expand All @@ -154,9 +169,26 @@ SGMatrix<float64_t> CHessianLocallyLinearEmbedding::construct_weight_matrix(CSim
SG_FREE(parameters);
SG_FREE(threads);
#else
HESSIANESTIMATION_THREAD_PARAM single_thread_param = {t,num_threads,N,m_k,dim,N,dp,target_dim,neighborhood_matrix.matrix,
feature_matrix.matrix,local_feature_matrix,Yu_matrix,mean_vector,
s_values_vector,tau,tau_len,w_sum_vector,q_matrix,W_matrix};
HESSIANESTIMATION_THREAD_PARAM single_thread_param;
single_thread_param.idx_start = t;
single_thread_param.idx_step = num_threads;
single_thread_param.idx_stop = N;
single_thread_param.m_k = m_k;
single_thread_param.dim = dim;
single_thread_param.target_dim = target_dim;
single_thread_param.N = N;
single_thread_param.dp = dp;
single_thread_param.neighborhood_matrix = neighborhood_matrix.matrix;
single_thread_param.feature_matrix = feature_matrix.matrix;
single_thread_param.local_feature_matrix = local_feature_matrix;
single_thread_param.Yi_matrix = Yi_matrix;
single_thread_param.mean_vector = mean_vector;
single_thread_param.s_values_vector = s_values_vector;
single_thread_param.tau = tau;
single_thread_param.tau_len = tau_len;
single_thread_param.w_sum_vector = w_sum_vector;
single_thread_param.q_matrix = q_matrix;
single_thread_param.W_matrix = W_matrix;
run_hessianestimation_thread((void*)&single_thread_param);
#endif

Expand Down
30 changes: 24 additions & 6 deletions src/shogun/preprocessor/Isomap.cpp
Expand Up @@ -202,7 +202,7 @@ SGMatrix<float64_t> CIsomap::isomap_distance(SGMatrix<float64_t> D_matrix)
heaps[t] = new CFibonacciHeap(N);

#else
int32_t num_threads = 1;
int32_t num_threads = 1;
#endif

// allocate (s)olution
Expand All @@ -219,7 +219,16 @@ SGMatrix<float64_t> CIsomap::isomap_distance(SGMatrix<float64_t> D_matrix)
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
for (t=0; t<num_threads; t++)
{
parameters[t] = (DIJKSTRA_THREAD_PARAM){heaps[t],edges_matrix,edges_idx_matrix,shortest_D,t,N,num_threads,m_k,s+t*N,f+t*N};
parameters[t].i_start = t;
parameters[t].i_stop = N;
parameters[t].i_step = num_threads;
parameters[t].heap = heaps[t];
parameters[t].edges_matrix = edges_matrix;
parameters[t].edges_idx_matrix = edges_idx_matrix;
parameters[t].s = s+t*N;
parameters[t].f = f+t*N;
parameters[t].m_k = m_k;
parameters[t].shortest_D = shortest_D;
pthread_create(&threads[t], &attr, CIsomap::run_dijkstra_thread, (void*)&parameters[t]);
}
for (t=0; t<num_threads; t++)
Expand All @@ -231,11 +240,20 @@ SGMatrix<float64_t> CIsomap::isomap_distance(SGMatrix<float64_t> D_matrix)
SG_FREE(parameters);
SG_FREE(threads);
#else

DIJKSTRA_THREAD_PARAM single_thread_param = {heaps[0],edges_matrix,edges_idx_matrix,shortest_D,0,N,1,m_k,s,f};
D_THREAD_PARAM single_thread_param;
single_thread_param.i_start = 0;
single_thread_param.i_stop = N;
single_thread_param.i_step = 1;
single_thread_param.m_k = m_k;
single_thread_param.heap = new CFibonacciHeap(N);
single_thread_param.edges_matrix = edges_matrix;
single_thread_param.edges_idx_matrix = edges_idx_matrix;
single_thread_param.s = s;
single_thread_param.f = f;
single_thread_param.shortest_D = shortest_D;

run_dijkstra_thread((void*)&single_thread_param);
delete heaps[0];
SG_FREE(heaps);
delete single_thread_param.heap;
#endif
// cleanup
SG_FREE(edges_matrix);
Expand Down
95 changes: 79 additions & 16 deletions src/shogun/preprocessor/KernelLocallyLinearEmbedding.cpp
Expand Up @@ -29,10 +29,10 @@ struct LK_RECONSTRUCTION_THREAD_PARAM
{
/// starting index of loop
int32_t idx_start;
/// end loop index
int32_t idx_stop;
/// step of loop
int32_t idx_step;
/// end loop index
int32_t idx_stop;
/// number of neighbors
int32_t m_k;
/// number of vectors
Expand Down Expand Up @@ -136,7 +136,7 @@ SGMatrix<float64_t> CKernelLocallyLinearEmbedding::apply_to_feature_matrix(CFeat
// get dimensionality and number of vectors of data
bool is_simple = ((features->get_feature_class()==C_SIMPLE) && (features->get_feature_type()==F_DREAL));
int32_t N = features->get_num_vectors();
int32_t target_dim = 0;
int32_t target_dim;
if (is_simple)
target_dim = calculate_effective_target_dim(((CSimpleFeatures<float64_t>*)features)->get_num_features());
else
Expand All @@ -158,7 +158,7 @@ SGMatrix<float64_t> CKernelLocallyLinearEmbedding::apply_to_feature_matrix(CFeat
m_kernel->cleanup();

// init W (weight) matrix
SGMatrix<float64_t> M_matrix = construct_weight_matrix(kernel_matrix,neighborhood_matrix,target_dim);
SGMatrix<float64_t> M_matrix = construct_weight_matrix(kernel_matrix,neighborhood_matrix);
neighborhood_matrix.destroy_matrix();

SGMatrix<float64_t> nullspace = find_null_space(M_matrix,target_dim);
Expand All @@ -179,8 +179,7 @@ SGMatrix<float64_t> CKernelLocallyLinearEmbedding::apply_to_feature_matrix(CFeat
}

SGMatrix<float64_t> CKernelLocallyLinearEmbedding::construct_weight_matrix(SGMatrix<float64_t> kernel_matrix,
SGMatrix<int32_t> neighborhood_matrix,
int32_t target_dim)
SGMatrix<int32_t> neighborhood_matrix)
{
int32_t N = kernel_matrix.num_cols;
// loop variables
Expand All @@ -205,9 +204,16 @@ SGMatrix<float64_t> CKernelLocallyLinearEmbedding::construct_weight_matrix(SGMat
#ifdef HAVE_PTHREAD
for (t=0; t<num_threads; t++)
{
parameters[t] = (LK_RECONSTRUCTION_THREAD_PARAM){t,num_threads,N,m_k,N,neighborhood_matrix.matrix,
local_gram_matrix+(m_k*m_k)*t,kernel_matrix.matrix,
id_vector+m_k*t,W_matrix};
parameters[t].idx_start = t;
parameters[t].idx_step = num_threads;
parameters[t].idx_stop = N;
parameters[t].m_k = m_k;
parameters[t].N = N;
parameters[t].neighborhood_matrix = neighborhood_matrix.matrix;
parameters[t].kernel_matrix = kernel_matrix.matrix;
parameters[t].local_gram_matrix = local_gram_matrix+(m_k*m_k)*t;
parameters[t].id_vector = id_vector+m_k*t;
parameters[t].W_matrix = W_matrix;
pthread_create(&threads[t], &attr, run_linearreconstruction_thread, (void*)&parameters[t]);
}
for (t=0; t<num_threads; t++)
Expand All @@ -216,8 +222,17 @@ SGMatrix<float64_t> CKernelLocallyLinearEmbedding::construct_weight_matrix(SGMat
SG_FREE(parameters);
SG_FREE(threads);
#else
LK_RECONSTRUCTION_THREAD_PARAM single_thread_param = {0,1,N,m_k,N,neighborhood_matrix.matrix,local_gram_matrix,
kernel_matrix.matrix,id_vector,W_matrix};
LK_RECONSTRUCTION_THREAD_PARAM single_thread_param;
single_thread_param.idx_start = 0;
single_thread_param.idx_step = 1;
single_thread_param.idx_stop = N;
single_thread_param.m_k = m_k;
single_thread_param.N = N;
single_thread_param.neighborhood_matrix = neighborhood_matrix.matrix;
single_thread_param.local_gram_matrix = local_gram_matrix;
single_thread_param.kernel_matrix = kernel_matrix.matrix;
single_thread_param.id_vector = id_vector;
single_thread_param.W_matrix = W_matrix;
run_linearreconstruction_thread((void*)single_thread_param);
#endif

Expand Down Expand Up @@ -258,7 +273,13 @@ SGMatrix<float64_t> CKernelLocallyLinearEmbedding::construct_weight_matrix(SGMat

for (t=0; t<num_threads; t++)
{
parameters_[t] = (SPARSEDOT_THREAD_PARAM){t,num_threads,N,N,W_matrix,M_matrix.matrix,nz_idxs};
parameters_[t].idx_start = t;
parameters_[t].idx_step = num_threads;
parameters_[t].idx_stop = N;
parameters_[t].N = N;
parameters_[t].W_matrix = W_matrix;
parameters_[t].M_matrix = M_matrix.matrix;
parameters_[t].nz_idxs = nz_idxs;
pthread_create(&threads[t], &attr_, run_sparsedot_thread, (void*)&parameters_[t]);
}
for (t=0; t<num_threads; t++)
Expand All @@ -267,7 +288,14 @@ SGMatrix<float64_t> CKernelLocallyLinearEmbedding::construct_weight_matrix(SGMat
SG_FREE(parameters_);
SG_FREE(threads);
#else
SPARSEDOT_THREAD_PARAM single_thread_param = {0,1,N,N,W_matrix,M_matrix.matrix,nz_idxs};
SPARSEDOT_THREAD_PARAM single_thread_param;
single_thread_param.idx_start = 0;
single_thread_param.idx_step = 1;
single_thread_param.idx_stop = N;
single_thread_param.N = N;
single_thread_param.W_matrix = W_matrix;
single_thread_param.M_matrix = M_matrix.matrix;
single_thread_param.nz_idxs = nz_idxs;
run_sparsedot_thread((void*)single_thread_param);
#endif
for (i=0; i<N; i++)
Expand All @@ -289,6 +317,22 @@ SGVector<float64_t> CKernelLocallyLinearEmbedding::apply_to_feature_vector(SGVec
return vector;
}

void CKernelLocallyLinearEmbedding::construct_local_gram_matrix(float64_t* local_gram_matrix,
const float64_t* kernel_matrix,
const int32_t* neighborhood_matrix,
int32_t i, int32_t N, int32_t m_k_)
{
for (int32_t j=0; j<m_k_; j++)
{
for (int32_t k=0; k<m_k_; k++)
local_gram_matrix[j*m_k_+k] =
kernel_matrix[i*N+i] -
kernel_matrix[i*N+neighborhood_matrix[j*N+i]] -
kernel_matrix[i*N+neighborhood_matrix[k*N+i]] +
kernel_matrix[neighborhood_matrix[j*N+i]*N+neighborhood_matrix[k*N+i]];
}
}

void* CKernelLocallyLinearEmbedding::run_linearreconstruction_thread(void* p)
{
LK_RECONSTRUCTION_THREAD_PARAM* parameters = (LK_RECONSTRUCTION_THREAD_PARAM*)p;
Expand All @@ -303,11 +347,14 @@ void* CKernelLocallyLinearEmbedding::run_linearreconstruction_thread(void* p)
float64_t* id_vector = parameters->id_vector;
float64_t* W_matrix = parameters->W_matrix;

int32_t i,j,k;
int32_t i,j;
float64_t norming,trace;

for (i=idx_start; i<idx_stop; i+=idx_step)
{
// form local gram matrix
construct_local_gram_matrix(local_gram_matrix,kernel_matrix,neighborhood_matrix,i,N,m_k);
/*
for (j=0; j<m_k; j++)
{
for (k=0; k<m_k; k++)
Expand All @@ -317,6 +364,7 @@ void* CKernelLocallyLinearEmbedding::run_linearreconstruction_thread(void* p)
kernel_matrix[i*N+neighborhood_matrix[k*N+i]] +
kernel_matrix[neighborhood_matrix[j*N+i]*N+neighborhood_matrix[k*N+i]];
}
*/

for (j=0; j<m_k; j++)
id_vector[j] = 1.0;
Expand Down Expand Up @@ -371,7 +419,14 @@ SGMatrix<int32_t> CKernelLocallyLinearEmbedding::get_neighborhood_matrix(SGMatri
#ifdef HAVE_PTHREAD
for (t=0; t<num_threads; t++)
{
parameters[t] = (K_NEIGHBORHOOD_THREAD_PARAM){t,num_threads,N,m_k,N,heaps[t],kernel_matrix.matrix,neighborhood_matrix};
parameters[t].idx_start = t;
parameters[t].idx_step = num_threads;
parameters[t].idx_stop = N;
parameters[t].m_k = m_k;
parameters[t].N = N;
parameters[t].heap = heaps[t];
parameters[t].neighborhood_matrix = neighborhood_matrix;
parameters[t].kernel_matrix = kernel_matrix.matrix;
pthread_create(&threads[t], &attr, run_neighborhood_thread, (void*)&parameters[t]);
}
for (t=0; t<num_threads; t++)
Expand All @@ -380,7 +435,15 @@ SGMatrix<int32_t> CKernelLocallyLinearEmbedding::get_neighborhood_matrix(SGMatri
SG_FREE(threads);
SG_FREE(parameters);
#else
K_NEIGHBORHOOD_THREAD_PARAM single_thread_param = {0,1,N,m_k,N,heaps[0],kernel_matrix.matrix,neighborhood_matrix};
K_NEIGHBORHOOD_THREAD_PARAM single_thread_param;
single_thread_param.idx_start = 0;
single_thread_param.idx_step = 1;
single_thread_param.idx_stop = N;
single_thread_param.m_k = m_k;
single_thread_param.N = N;
single_thread_param.heap = heaps[0]
single_thread_param.neighborhood_matrix = neighborhood_matrix;
single_thread_param.kernel_matrix = kernel_matrix.matrix;
run_neighborhood_thread((void*)&single_thread_param);
#endif

Expand Down
7 changes: 5 additions & 2 deletions src/shogun/preprocessor/KernelLocallyLinearEmbedding.h
Expand Up @@ -79,8 +79,11 @@ class CKernelLocallyLinearEmbedding: public CLocallyLinearEmbedding

/** construct weight matrix */
virtual SGMatrix<float64_t> construct_weight_matrix(SGMatrix<float64_t> kernel_matrix,
SGMatrix<int32_t> neighborhood_matrix,
int32_t target_dim);
SGMatrix<int32_t> neighborhood_matrix);

/** construct local gram matrix */
static void construct_local_gram_matrix(float64_t* local_gram_matrix, const float64_t* kernel_matrix,
const int32_t* neighborhood_matrix, int32_t i, int32_t N, int32_t m_k_);

/** runs neighborhood determination thread
* @param p thread params
Expand Down
39 changes: 32 additions & 7 deletions src/shogun/preprocessor/LocalTangentSpaceAlignment.cpp
Expand Up @@ -128,10 +128,22 @@ SGMatrix<float64_t> CLocalTangentSpaceAlignment::construct_weight_matrix(CSimple

for (t=0; t<num_threads; t++)
{
parameters[t] = (LTSA_THREAD_PARAM){t,num_threads,N,m_k,target_dim,dim,N,neighborhood_matrix.matrix,
G_matrix+(m_k)*(1+target_dim)*t,mean_vector+dim*t,
local_feature_matrix+(m_k*dim)*t,feature_matrix.matrix,
s_values_vector+dim*t,q_matrix+(m_k*m_k)*t,W_matrix,&W_matrix_lock};
parameters[t].idx_start = t;
parameters[t].idx_step = num_threads;
parameters[t].idx_stop = N;
parameters[t].m_k = m_k;
parameters[t].target_dim = target_dim;
parameters[t].dim = dim;
parameters[t].N = N;
parameters[t].neighborhood_matrix = neighborhood_matrix.matrix;
parameters[t].G_matrix = G_matrix + (m_k*(1+target_dim))*t;
parameters[t].mean_vector = mean_vector + dim*t;
parameters[t].local_feature_matrix = local_feature_matrix + (m_k*dim)*t;
parameters[t].feature_matrix = feature_matrix.matrix;
parameters[t].s_values_vector = s_values_vector + dim*t;
parameters[t].q_matrix = q_matrix + (m_k*m_k)*t;
parameters[t].W_matrix = W_matrix;
parameters[t].W_matrix_lock = &W_matrix_lock;
pthread_create(&threads[t], &attr, run_ltsa_thread, (void*)&parameters[t]);
}
for (t=0; t<num_threads; t++)
Expand All @@ -140,9 +152,22 @@ SGMatrix<float64_t> CLocalTangentSpaceAlignment::construct_weight_matrix(CSimple
SG_FREE(parameters);
SG_FREE(threads);
#else
LTSA_THREAD_PARAM single_thread_param = {0,1,N,m_k,target_dim,dim,N,neighborhood_matrix.matrix,
G_matrix,mean_vector,local_feature_matrix,feature_matrix.matrix,
s_values_vector,q_matrix,W_matrix};
LTSA_THREAD_PARAM single_thread_param;
single_thread_param.idx_start = 0;
single_thread_param.idx_step = 1;
single_thread_param.idx_stop = N;
single_thread_param.m_k = m_k;
single_thread_param.target_dim = target_dim;
single_thread_param.dim = dim;
single_thread_param.N = N;
single_thread_param.neighborhood_matrix = neighborhood_matrix.matrix;
single_thread_param.G_matrix = G_matrix;
single_thread_param.mean_vector = mean_vector;
single_thread_param.local_feature_matrix = local_feature_matrix;
single_thread_param.feature_matrix = feature_matrix.matrix;
single_thread_param.s_values_vector = s_values_vector;
single_thread_param.q_matrix = q_matrix;
single_thread_param.W_matrix = W_matrix;
run_ltsa_thread((void*)&single_thread_param);
#endif

Expand Down

0 comments on commit 15ebd97

Please sign in to comment.