Skip to content

Commit

Permalink
Merge pull request #609 from puffin444/master
Browse files Browse the repository at this point in the history
Added Kernel Scaling to GPs. Fixed some warnings/math errors.
  • Loading branch information
karlnapf committed Jun 29, 2012
2 parents 2660a96 + c1e2e27 commit 047feef
Show file tree
Hide file tree
Showing 11 changed files with 106 additions and 44 deletions.
32 changes: 18 additions & 14 deletions examples/undocumented/libshogun/regression_gaussian_process.cpp
Expand Up @@ -92,22 +92,27 @@ int main(int argc, char **argv)

CModelSelectionParameters* root=new CModelSelectionParameters();

CModelSelectionParameters* c2=new CModelSelectionParameters("inference_method", inf);
root->append_child(c2);
CModelSelectionParameters* c1=new CModelSelectionParameters("inference_method", inf);
root->append_child(c1);

CModelSelectionParameters* c2=new CModelSelectionParameters("scale");
c1 ->append_child(c2);
c2->build_values(0.001, 4.0, R_LINEAR);


CModelSelectionParameters* c3=new CModelSelectionParameters("likelihood_model", lik);
c2->append_child(c3);
c1->append_child(c3);

CModelSelectionParameters* c1=new CModelSelectionParameters("sigma");
c3->append_child(c1);
c1->build_values(-10.0, 2.0, R_EXP);
CModelSelectionParameters* c4=new CModelSelectionParameters("sigma");
c3->append_child(c4);
c4->build_values(0.001, 4.0, R_LINEAR);

CModelSelectionParameters* c4=new CModelSelectionParameters("kernel", test_kernel);
c2->append_child(c4);
CModelSelectionParameters* c5=new CModelSelectionParameters("kernel", test_kernel);
c1->append_child(c5);

CModelSelectionParameters* c5=new CModelSelectionParameters("width");
c4->append_child(c5);
c5->build_values(-10.0, 2.0, R_EXP);
CModelSelectionParameters* c6=new CModelSelectionParameters("width");
c5->append_child(c6);
c6->build_values(0.01, 4.0, R_LINEAR);

/* cross validation class for evaluation in model selection */
SG_REF(gp);
Expand Down Expand Up @@ -147,13 +152,12 @@ int main(int argc, char **argv)

result->print_result();


SGVector<float64_t> alpha = inf->get_alpha();
SGVector<float64_t> labe = labels->get_labels();
SGVector<float64_t> diagonal = inf->get_diagonal_vector();
SGMatrix<float64_t> cholesky = inf->get_cholesky();
SGVector<float64_t> covariance = gp->getCovarianceVector(features2);
CRegressionLabels* predictions = gp->apply_regression(features2);
SGVector<float64_t> covariance = gp->getCovarianceVector(features);
CRegressionLabels* predictions = gp->apply_regression(features);

SGVector<float64_t>::display_vector(alpha.vector, alpha.vlen, "Alpha Vector");
SGVector<float64_t>::display_vector(labe.vector, labe.vlen, "Labels");
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/evaluation/DifferentiableFunction.h
Expand Up @@ -39,7 +39,7 @@ class CDifferentiableFunction: public CSGObject
* @return Map of gradient. Keys are names of parameters, values are
* values of derivative with respect to that parameter.
*/
virtual CMap<SGString<char>, float64_t> get_gradient() = 0;
virtual CMap<SGString<const char>, float64_t> get_gradient() = 0;

/*Get the function value
*
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/evaluation/GradientResult.h
Expand Up @@ -57,7 +57,7 @@ class CGradientResult: public CEvaluationResult
SGVector<float64_t> quantity;

/*Function Gradient*/
CMap<SGString<char>, float64_t> gradient;
CMap<SGString<const char>, float64_t> gradient;

/** Returns the function value
* and gradient contained in the object.
Expand Down
14 changes: 7 additions & 7 deletions src/shogun/modelselection/GradientModelSelection.cpp
Expand Up @@ -46,10 +46,10 @@ double CGradientModelSelection::nlopt_function(unsigned n,
/*Set parameter values from x vector*/
for (unsigned int i = 0; i < n; i++)
{
shogun::CMapNode<shogun::SGString<char>, float64_t>* node =
shogun::CMapNode<shogun::SGString<const char>, float64_t>* node =
result->gradient.get_node_ptr(i);

char* name = node->key.string;
const char* name = node->key.string;

if (!m_current_combination->set_parameter(name, x[i]))
SG_SERROR("Parameter %s not found in combination tree.\n",
Expand All @@ -72,7 +72,7 @@ double CGradientModelSelection::nlopt_function(unsigned n,
/*Store the gradient into the grad vector*/
for (unsigned int i = 0; i < n; i++)
{
shogun::CMapNode<shogun::SGString<char>, float64_t>* node =
shogun::CMapNode<shogun::SGString<const char>, float64_t>* node =
result->gradient.get_node_ptr(i);
grad[i] = node->data;
}
Expand Down Expand Up @@ -158,23 +158,23 @@ CParameterCombination* CGradientModelSelection::select_model(bool print_state)
//Set lower bounds for parameters
for (int i = 0; i < n; i++)
{
CMapNode<SGString<char>, float64_t>* node =
CMapNode<SGString<const char>, float64_t>* node =
result->gradient.get_node_ptr(i);

TParameter* param =
lower_combination->get_parameter(node->key.string);

if (!param)
SG_ERROR("Could not find parameter %s"\
"in Parameter Combination", node->key.string);
SG_ERROR("Could not find parameter %s "\
"in Parameter Combination\n", node->key.string);

lb[i] = *((float64_t*)(param->m_parameter));
}

//Update x with initial values
for (int i = 0; i < n; i++)
{
CMapNode<SGString<char>, float64_t>* node =
CMapNode<SGString<const char>, float64_t>* node =
result->gradient.get_node_ptr(i);

TParameter* param =
Expand Down
11 changes: 7 additions & 4 deletions src/shogun/modelselection/ParameterCombination.cpp
Expand Up @@ -52,7 +52,8 @@ void CParameterCombination::append_child(CParameterCombination* child)
m_child_nodes->append_element(child);
}

bool CParameterCombination::set_parameter(char* name, float64_t value)
bool CParameterCombination::set_parameter(const char* name,
float64_t value)
{
if (m_param)
{
Expand Down Expand Up @@ -90,7 +91,8 @@ bool CParameterCombination::set_parameter(char* name, float64_t value)
return result;
}

bool CParameterCombination::set_parameter(char* name, int32_t value)
bool CParameterCombination::set_parameter(const char* name,
int32_t value)
{
if (m_param)
{
Expand Down Expand Up @@ -128,7 +130,8 @@ bool CParameterCombination::set_parameter(char* name, int32_t value)
return result;
}

bool CParameterCombination::set_parameter(char* name, bool value)
bool CParameterCombination::set_parameter(const char* name,
bool value)
{
if (m_param)
{
Expand Down Expand Up @@ -166,7 +169,7 @@ bool CParameterCombination::set_parameter(char* name, bool value)
return result;
}

TParameter* CParameterCombination::get_parameter(char* name)
TParameter* CParameterCombination::get_parameter(const char* name)
{
if (m_param)
{
Expand Down
8 changes: 4 additions & 4 deletions src/shogun/modelselection/ParameterCombination.h
Expand Up @@ -120,7 +120,7 @@ friend class CModelSelectionParameters;
*
* @return bool true if value succesfully set.
*/
bool set_parameter(char* name, float64_t value);
bool set_parameter(const char* name, float64_t value);

/** Sets specific parameter to specified value.
*
Expand All @@ -129,7 +129,7 @@ friend class CModelSelectionParameters;
*
* @return bool true if value succesfully set.
*/
bool set_parameter(char* name, int32_t value);
bool set_parameter(const char* name, int32_t value);

/** Sets specific parameter to specified value.
*
Expand All @@ -138,14 +138,14 @@ friend class CModelSelectionParameters;
*
* @return bool true if value succesfully set.
*/
bool set_parameter(char* name, bool value);
bool set_parameter(const char* name, bool value);

/** Gets specific parameter by name.
*
* @param name Name of parameter
* return specified parameter
*/
TParameter* get_parameter(char* name);
TParameter* get_parameter(const char* name);

/** checks whether this node has children
*
Expand Down
21 changes: 21 additions & 0 deletions src/shogun/regression/GaussianProcessRegression.cpp
Expand Up @@ -69,6 +69,7 @@ CRegressionLabels* CGaussianProcessRegression::apply_regression(CFeatures* data)
SG_ERROR("Null data vector!\n");

SGVector<float64_t> m_alpha = m_method->get_alpha();
float64_t m_scale = m_method->get_scale();
CKernel* kernel = m_method->get_kernel();

kernel->cleanup();
Expand All @@ -78,6 +79,12 @@ CRegressionLabels* CGaussianProcessRegression::apply_regression(CFeatures* data)
//K(X_test, X_train)
SGMatrix<float64_t> kernel_test_matrix = kernel->get_kernel_matrix();

for (int i = 0; i < kernel_test_matrix.num_rows; i++)
{
for (int j = 0; j < kernel_test_matrix.num_cols; j++)
kernel_test_matrix(i,j) *= (m_scale*m_scale);
}

SGVector< float64_t > result_vector(m_labels->get_num_labels());

//Here we multiply K*^t by alpha to receive the mean predictions.
Expand Down Expand Up @@ -120,13 +127,21 @@ SGVector<float64_t> CGaussianProcessRegression::getCovarianceVector(

CKernel* kernel = m_method->get_kernel();

float64_t m_scale = m_method->get_scale();

kernel->cleanup();

kernel->init(m_features, data);

//K(X_test, X_train)
SGMatrix<float64_t> kernel_test_matrix = kernel->get_kernel_matrix();

for (int i = 0; i < kernel_test_matrix.num_rows; i++)
{
for (int j = 0; j < kernel_test_matrix.num_cols; j++)
kernel_test_matrix(i,j) *= (m_scale*m_scale);
}

for (int i = 0; i < diagonal.vlen; i++)
{
for (int j = 0; j < data->get_num_vectors(); j++)
Expand Down Expand Up @@ -175,6 +190,12 @@ SGVector<float64_t> CGaussianProcessRegression::getCovarianceVector(

SGMatrix<float64_t> kernel_test_matrix2 = kernel->get_kernel_matrix();

for (int i = 0; i < kernel_test_matrix2.num_rows; i++)
{
for (int j = 0; j < kernel_test_matrix2.num_cols; j++)
kernel_test_matrix2(i,j) *= (m_scale*m_scale);
}

SGVector<float64_t> result(kernel_test_matrix2.num_cols);

//Subtract V from K(Test,Test) to get covariances.
Expand Down
37 changes: 27 additions & 10 deletions src/shogun/regression/gp/ExactInferenceMethod.cpp
Expand Up @@ -74,7 +74,7 @@ void CExactInferenceMethod::check_members()
}
}

CMap<SGString<char>, float64_t> CExactInferenceMethod::
CMap<SGString<const char>, float64_t> CExactInferenceMethod::
get_marginal_likelihood_derivatives()
{
check_members();
Expand All @@ -89,10 +89,9 @@ CMap<SGString<char>, float64_t> CExactInferenceMethod::
m_kernel->init(m_features, m_features);

//This will be the vector we return
CMap<SGString<char>, float64_t> gradient(
2+m_mean->m_parameters->get_num_parameters(),
2+m_mean->m_parameters->get_num_parameters());

CMap<SGString<const char>, float64_t> gradient(
3+m_mean->m_parameters->get_num_parameters(),
3+m_mean->m_parameters->get_num_parameters());

//Get the sigma variable from the likelihood model
float64_t m_sigma = dynamic_cast<CGaussianLikelihood*>(m_model)->get_sigma();
Expand Down Expand Up @@ -151,16 +150,28 @@ CMap<SGString<char>, float64_t> CExactInferenceMethod::
for (int i = 0; i < Q.num_rows; i++)
{
for (int j = 0; j < Q.num_cols; j++)
sum += Q(i,j)*deriv(i,j);
sum += Q(i,j)*deriv(i,j)*m_scale*m_scale;
}

sum /= 2.0;

gradient.add(SGString<char>("width", strlen("width"), true), sum);
gradient.add(SGString<const char>("width", strlen("width"), true), sum);

sum = 0;

for (int i = 0; i < Q.num_rows; i++)
{
for (int j = 0; j < Q.num_cols; j++)
sum += Q(i,j)*kernel_matrix(i,j)*m_scale*2.0;
}

sum = m_sigma*m_sigma*Q.trace(Q.matrix, Q.num_rows, Q.num_cols);
sum /= 2.0;

gradient.add(SGString<const char>("scale", strlen("scale"), true), sum);

sum = m_sigma*Q.trace(Q.matrix, Q.num_rows, Q.num_cols);

gradient.add(SGString<char>("sigma", strlen("sigma"), true), sum);
gradient.add(SGString<const char>("sigma", strlen((char*)"sigma"), true), sum);

for (int i = 0; i < m_mean->m_parameters->get_num_parameters(); i++)
{
Expand All @@ -171,7 +182,7 @@ CMap<SGString<char>, float64_t> CExactInferenceMethod::

sum = data_means.dot(data_means.vector, m_alpha.vector, m_alpha.vlen);

gradient.add(SGString<char>(param->m_name,
gradient.add(SGString<const char>(param->m_name,
strlen(param->m_name), true), sum);
}

Expand Down Expand Up @@ -267,6 +278,12 @@ void CExactInferenceMethod::update_alpha_and_chol()
//K(X, X)
SGMatrix<float64_t> kernel_matrix = m_kernel->get_kernel_matrix();

for (int i = 0; i < kernel_matrix.num_rows; i++)
{
for (int j = 0; j < kernel_matrix.num_cols; j++)
kernel_matrix(i,j) *= (m_scale*m_scale);
}

//Placeholder Matrices
SGMatrix<float64_t> temp1(kernel_matrix.num_rows,
kernel_matrix.num_cols);
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/regression/gp/ExactInferenceMethod.h
Expand Up @@ -79,7 +79,7 @@ class CExactInferenceMethod: public CInferenceMethod
* -\frac{\partial {log(p(y|X, \theta))}}{\partial \theta}
* \f]
*/
virtual CMap<SGString<char>, float64_t> get_marginal_likelihood_derivatives();
virtual CMap<SGString<const char>, float64_t> get_marginal_likelihood_derivatives();

/** get Alpha Matrix
*
Expand Down Expand Up @@ -132,7 +132,7 @@ class CExactInferenceMethod: public CInferenceMethod
* @return Map of gradient. Keys are names of parameters, values are
* values of derivative with respect to that parameter.
*/
virtual CMap<SGString<char>, float64_t> get_gradient()
virtual CMap<SGString<const char>, float64_t> get_gradient()
{
return get_marginal_likelihood_derivatives();
};
Expand Down
2 changes: 2 additions & 0 deletions src/shogun/regression/gp/InferenceMethod.cpp
Expand Up @@ -47,6 +47,7 @@ CInferenceMethod::~CInferenceMethod() {
void CInferenceMethod::init()
{
SG_ADD((CSGObject**)&m_kernel, "kernel", "Kernel", MS_AVAILABLE);
SG_ADD(&m_scale, "scale", "Kernel Scale", MS_AVAILABLE);
SG_ADD((CSGObject**)&m_model, "likelihood_model", "Likelihood model",
MS_AVAILABLE);
SG_ADD((CSGObject**)&m_labels, "labels", "Labels", MS_NOT_AVAILABLE);
Expand All @@ -58,4 +59,5 @@ void CInferenceMethod::init()
m_labels = NULL;
m_features = NULL;
m_mean = NULL;
m_scale = 1.0;
}

0 comments on commit 047feef

Please sign in to comment.