Skip to content

Commit

Permalink
Merge pull request #588 from cwidmer/master
Browse files Browse the repository at this point in the history
syntactic sugar for SGSparseVector
  • Loading branch information
lisitsyn committed Jun 19, 2012
2 parents aa9ad6a + b89a58b commit 43ea7ab
Show file tree
Hide file tree
Showing 5 changed files with 79 additions and 10 deletions.
1 change: 1 addition & 0 deletions src/interfaces/octave_modular/swig_typemaps.i
Expand Up @@ -346,6 +346,7 @@ TYPEMAP_STRINGFEATURES_OUT(float64_t, Matrix)
}
else
matrix[i].features=NULL;
matrix[i].create_idx_map();
}
ASSERT(offset=nnz);
$1 = shogun::SGSparseMatrix<type>(matrix, num_feat, num_vec, true);
Expand Down
1 change: 1 addition & 0 deletions src/interfaces/python_modular/swig_typemaps.i
Expand Up @@ -625,6 +625,7 @@ static bool spmatrix_from_numpy(SGSparseMatrix<type>& sg_matrix, PyObject* obj,
}
sfm[i-1].num_feat_entries=num;
sfm[i-1].features=features;
sfm[i-1].create_idx_map();
}
}

Expand Down
77 changes: 75 additions & 2 deletions src/shogun/lib/SGSparseVector.h
Expand Up @@ -5,6 +5,7 @@
* (at your option) any later version.
*
* Written (W) 2012 Fernando José Iglesias García
* Written (W) 2012 Christian Widmer
* Written (W) 2010,2012 Soeren Sonnenburg
* Copyright (C) 2010 Berlin Institute of Technology
* Copyright (C) 2012 Soeren Sonnenburg
Expand All @@ -15,10 +16,12 @@

#include <shogun/lib/config.h>
#include <shogun/lib/DataType.h>
#include <map>

namespace shogun
{


/** @brief template class SGSparseVectorEntry */
template <class T> struct SGSparseVectorEntry
{
Expand All @@ -40,7 +43,10 @@ template <class T> class SGSparseVector
SGSparseVector(SGSparseVectorEntry<T>* feats, index_t num_entries,
index_t index, bool free_v=false) :
num_feat_entries(num_entries), features(feats),
do_free(free_v) {}
do_free(free_v)
{
create_idx_map();
}

/** constructor to create new vector in memory */
SGSparseVector(index_t num_entries, index_t index, bool free_v=false) :
Expand All @@ -52,14 +58,18 @@ template <class T> class SGSparseVector
/** copy constructor */
SGSparseVector(const SGSparseVector& orig) :
num_feat_entries(orig.num_feat_entries),
features(orig.features), do_free(orig.do_free) {}
features(orig.features), do_free(orig.do_free)
{
create_idx_map();
}

/** free vector */
void free_vector()
{
if (do_free)
SG_FREE(features);

dense_to_sparse_idx.clear();
features=NULL;
do_free=false;
num_feat_entries=0;
Expand All @@ -72,6 +82,55 @@ template <class T> class SGSparseVector
free_vector();
}

/** create mapping from dense idx to sparse idx */
void create_idx_map()
{
dense_to_sparse_idx.clear();
for (int32_t i=0; i!=num_feat_entries; i++)
{
dense_to_sparse_idx[features[i].feat_index] = i;
}
}

/** operator overload for vector read only access
*
* @param index dimension to access
*
*/
inline const T& operator[](index_t index) const
{
// lookup complexity is O(log n)
std::map<index_t, index_t>::const_iterator it = dense_to_sparse_idx.find(index);

if (it != dense_to_sparse_idx.end())
{
// use mapping for lookup
return features[it->second].entry;
} else {
return zero;
}
}


/** TODO: operator overload for vector r/w access
*
* @param index dimension to access
*
inline T& operator[](index_t index)
{
return dense_to_sparse_idx[index];
// lookup complexity is O(log n)
typename std::map<index_t, T>::iterator it = dense_to_sparse_idx.find(index);
if (it != dense_to_sparse_idx.end())
{
return it->second;
} else {
return dense_to_sparse_idx.insert(index, 0);
}
}
*/

public:
/** number of feature entries */
index_t num_feat_entries;
Expand All @@ -81,6 +140,20 @@ template <class T> class SGSparseVector

/** whether vector needs to be freed */
bool do_free;

protected:
/** store mapping of indices */
std::map<index_t, index_t> dense_to_sparse_idx;

/** zero element */
static const T zero;

};

// inititalize static member in template class
template <typename T>
const T SGSparseVector<T>::zero = T(0);

}

#endif // __SGSPARSEVECTOR_H__
5 changes: 1 addition & 4 deletions src/shogun/transfer/multitask/LibLinearMTL.cpp
Expand Up @@ -520,14 +520,11 @@ return obj
// look up task similarity
int32_t ti_j = task_indicator_lhs[j];

//TODO: same interface for sparse matrix
/*
float64_t ts = task_similarity_matrix.matrix[ti_i*num_tasks+ti_j];
const float64_t ts = task_similarity_matrix.sparse_matrix[ti_i][ti_j];

// compute objective
obj -= 0.5 * ts * alphas[i] * alphas[j] * ((CBinaryLabels*)m_labels)->get_label(i) *
((CBinaryLabels*)m_labels)->get_label(j) * features->dot(i, features,j);
*/
}
}

Expand Down
5 changes: 1 addition & 4 deletions src/shogun/transfer/multitask/LibLinearMTL.h
Expand Up @@ -192,14 +192,11 @@ class CLibLinearMTL : public CLinearMachine
float64_t* v_s = V.get_column_vector(s);
for (int32_t t=0; t<num_tasks; t++)
{
//TODO: same interface for SparseMatrix!
/*
float64_t sim_ts = task_similarity_matrix.matrix[s*num_tasks+t];
float64_t sim_ts = task_similarity_matrix.sparse_matrix[s][t];
for(int32_t i=0; i<w_size; i++)
{
W.matrix[t*w_size + i] += sim_ts * v_s[i];
}
*/
}
}

Expand Down

0 comments on commit 43ea7ab

Please sign in to comment.