Merge pull request #13718 from lochsh:svm-sigmoid-fix
SVM sigmoid kernel fix (issue #13621) (#13718) * Added test for sigmoid case for retrieving support vectors * undo unhelpful test * add test for sigmoid SVM with data that is easily separable into two concentric circles * Update sigmoid kernel to use tanh(gamma * <x, y> + coef0) instead of -tanh(gamma * <x, y> + coef0) * remove unnecessary constraint on coef0 * cleanup * fixing inappropriate use of doubles * Add f to float literal * replace CV_Assert with ASSERT_EQ where appropriate
This commit is contained in:
parent
2f5af1bd33
commit
418898029c
@ -200,20 +200,19 @@ public:
|
||||
{
|
||||
int j;
|
||||
calc_non_rbf_base( vcount, var_count, vecs, another, results,
|
||||
-2*params.gamma, -2*params.coef0 );
|
||||
2*params.gamma, 2*params.coef0 );
|
||||
// TODO: speedup this
|
||||
for( j = 0; j < vcount; j++ )
|
||||
{
|
||||
Qfloat t = results[j];
|
||||
Qfloat e = std::exp(-std::abs(t));
|
||||
Qfloat e = std::exp(std::abs(t));
|
||||
if( t > 0 )
|
||||
results[j] = (Qfloat)((1. - e)/(1. + e));
|
||||
else
|
||||
results[j] = (Qfloat)((e - 1.)/(e + 1.));
|
||||
else
|
||||
results[j] = (Qfloat)((1. - e)/(1. + e));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void calc_rbf( int vcount, int var_count, const float* vecs,
|
||||
const float* another, Qfloat* results )
|
||||
{
|
||||
@ -1310,8 +1309,6 @@ public:
|
||||
|
||||
if( kernelType != SIGMOID && kernelType != POLY )
|
||||
params.coef0 = 0;
|
||||
else if( params.coef0 < 0 )
|
||||
CV_Error( CV_StsOutOfRange, "The kernel parameter <coef0> must be positive or zero" );
|
||||
|
||||
if( kernelType != POLY )
|
||||
params.degree = 0;
|
||||
|
||||
@ -88,6 +88,51 @@ void CV_SVMTrainAutoTest::run( int /*start_from*/ )
|
||||
|
||||
TEST(ML_SVM, trainauto) { CV_SVMTrainAutoTest test; test.safe_run(); }
|
||||
|
||||
TEST(ML_SVM, trainauto_sigmoid)
|
||||
{
|
||||
const int datasize = 100;
|
||||
cv::Mat samples = cv::Mat::zeros( datasize, 2, CV_32FC1 );
|
||||
cv::Mat responses = cv::Mat::zeros( datasize, 1, CV_32S );
|
||||
|
||||
const float scale_factor = 0.5;
|
||||
const float radius = 2.0;
|
||||
|
||||
// Populate samples with data that can be split into two concentric circles
|
||||
for (int i = 0; i < datasize; i+=2)
|
||||
{
|
||||
const float pi = 3.14159f;
|
||||
const float angle_rads = (i/datasize) * pi;
|
||||
const float x = radius * cos(angle_rads);
|
||||
const float y = radius * cos(angle_rads);
|
||||
|
||||
// Larger circle
|
||||
samples.at<float>( i, 0 ) = x;
|
||||
samples.at<float>( i, 1 ) = y;
|
||||
responses.at<int>( i, 0 ) = 0;
|
||||
|
||||
// Smaller circle
|
||||
samples.at<float>( i + 1, 0 ) = x * scale_factor;
|
||||
samples.at<float>( i + 1, 1 ) = y * scale_factor;
|
||||
responses.at<int>( i + 1, 0 ) = 1;
|
||||
}
|
||||
|
||||
cv::Ptr<TrainData> data = TrainData::create( samples, cv::ml::ROW_SAMPLE, responses );
|
||||
cv::Ptr<SVM> svm = SVM::create();
|
||||
svm->setKernel(SVM::SIGMOID);
|
||||
|
||||
svm->setGamma(10.0);
|
||||
svm->setCoef0(-10.0);
|
||||
svm->trainAuto( data, 10 ); // 2-fold cross validation.
|
||||
|
||||
float test_data0[2] = {radius, radius};
|
||||
cv::Mat test_point0 = cv::Mat( 1, 2, CV_32FC1, test_data0 );
|
||||
ASSERT_EQ(0, svm->predict( test_point0 ));
|
||||
|
||||
float test_data1[2] = {scale_factor * radius, scale_factor * radius};
|
||||
cv::Mat test_point1 = cv::Mat( 1, 2, CV_32FC1, test_data1 );
|
||||
ASSERT_EQ(1, svm->predict( test_point1 ));
|
||||
}
|
||||
|
||||
|
||||
TEST(ML_SVM, trainAuto_regression_5369)
|
||||
{
|
||||
|
||||
Loading…
Reference in New Issue
Block a user