From ff549527697af8a66889c5a7d010fe6a768b1076 Mon Sep 17 00:00:00 2001 From: Marina Noskova Date: Mon, 15 Feb 2016 14:35:36 +0300 Subject: [PATCH] Corrected spelling mistakes --- modules/ml/include/opencv2/ml.hpp | 2 +- modules/ml/src/svmsgd.cpp | 23 +++++++++++------------ modules/ml/test/test_svmsgd.cpp | 2 +- samples/cpp/train_svmsgd.cpp | 4 ++-- 4 files changed, 15 insertions(+), 16 deletions(-) diff --git a/modules/ml/include/opencv2/ml.hpp b/modules/ml/include/opencv2/ml.hpp index 8b56f0d798..346df8f15c 100644 --- a/modules/ml/include/opencv2/ml.hpp +++ b/modules/ml/include/opencv2/ml.hpp @@ -1535,7 +1535,7 @@ The margin type may have one of the following values: \ref SOFT_MARGIN or \ref H - You should use \ref HARD_MARGIN type, if you have linearly separable sets. - You should use \ref SOFT_MARGIN type, if you have non-linearly separable sets or sets with outliers. -- In the general case (if you know nothing about linearly separability of your sets), use SOFT_MARGIN. +- In the general case (if you know nothing about linear separability of your sets), use SOFT_MARGIN. The other parameters may be described as follows: - \f$\lambda\f$ parameter is responsible for weights decreasing at each step and for the strength of restrictions on outliers diff --git a/modules/ml/src/svmsgd.cpp b/modules/ml/src/svmsgd.cpp index 3ae051a9dc..77ac2ad67b 100644 --- a/modules/ml/src/svmsgd.cpp +++ b/modules/ml/src/svmsgd.cpp @@ -11,7 +11,7 @@ // For Open Source Computer Vision Library // // Copyright (C) 2000, Intel Corporation, all rights reserved. -// Copyright (C) 2014, Itseez Inc, all rights reserved. +// Copyright (C) 2016, Itseez Inc, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -103,7 +103,7 @@ public: CV_IMPL_PROPERTY_S(cv::TermCriteria, TermCriteria, params.termCrit) private: - void updateWeights(InputArray sample, bool isFirstClass, float gamma, Mat &weights); + void updateWeights(InputArray sample, bool isPositive, float gamma, Mat &weights); std::pair areClassesEmpty(Mat responses); @@ -111,7 +111,7 @@ private: void readParams( const FileNode &fn ); - static inline bool isFirstClass(float val) { return val > 0; } + static inline bool isPositive(float val) { return val > 0; } static void normalizeSamples(Mat &matrix, Mat &average, float &multiplier); @@ -152,7 +152,7 @@ std::pair SVMSGDImpl::areClassesEmpty(Mat responses) for(int index = 0; index < limit_index; index++) { - if (isFirstClass(responses.at(index))) + if (isPositive(responses.at(index))) emptyInClasses.first = false; else emptyInClasses.second = false; @@ -172,7 +172,7 @@ void SVMSGDImpl::normalizeSamples(Mat &samples, Mat &average, float &multiplier) average = Mat(1, featuresCount, samples.type()); for (int featureIndex = 0; featureIndex < featuresCount; featureIndex++) { - Scalar scalAverage = mean(samples.col(featureIndex))[0]; + Scalar scalAverage = mean(samples.col(featureIndex)); average.at(featureIndex) = static_cast(scalAverage[0]); } @@ -190,13 +190,13 @@ void SVMSGDImpl::normalizeSamples(Mat &samples, Mat &average, float &multiplier) void SVMSGDImpl::makeExtendedTrainSamples(const Mat &trainSamples, Mat &extendedTrainSamples, Mat &average, float &multiplier) { - Mat normalisedTrainSamples = trainSamples.clone(); - int samplesCount = normalisedTrainSamples.rows; + Mat normalizedTrainSamples = trainSamples.clone(); + int samplesCount = normalizedTrainSamples.rows; - normalizeSamples(normalisedTrainSamples, average, multiplier); + normalizeSamples(normalizedTrainSamples, average, multiplier); Mat onesCol = Mat::ones(samplesCount, 1, CV_32F); - cv::hconcat(normalisedTrainSamples, onesCol, extendedTrainSamples); + cv::hconcat(normalizedTrainSamples, onesCol, extendedTrainSamples); } void SVMSGDImpl::updateWeights(InputArray _sample, bool firstClass, float gamma, Mat& weights) @@ -231,7 +231,7 @@ float SVMSGDImpl::calcShift(InputArray _samples, InputArray _responses) const Mat currentSample = trainSamples.row(samplesIndex); float dotProduct = static_cast(currentSample.dot(weights_)); - bool firstClass = isFirstClass(trainResponses.at(samplesIndex)); + bool firstClass = isPositive(trainResponses.at(samplesIndex)); int index = firstClass ? 0 : 1; float signToMul = firstClass ? 1.f : -1.f; float curDistance = dotProduct * signToMul; @@ -297,11 +297,10 @@ bool SVMSGDImpl::train(const Ptr& data, int) int randomNumber = rng.uniform(0, extendedTrainSamplesCount); //generate sample number Mat currentSample = extendedTrainSamples.row(randomNumber); - bool firstClass = isFirstClass(trainResponses.at(randomNumber)); float gamma = params.gamma0 * std::pow((1 + params.lambda * params.gamma0 * (float)iter), (-params.c)); //update gamma - updateWeights( currentSample, firstClass, gamma, extendedWeights ); + updateWeights( currentSample, isPositive(trainResponses.at(randomNumber)), gamma, extendedWeights ); //average weights (only for ASGD model) if (params.svmsgdType == ASGD) diff --git a/modules/ml/test/test_svmsgd.cpp b/modules/ml/test/test_svmsgd.cpp index 592e66b3b0..00ebbf3341 100644 --- a/modules/ml/test/test_svmsgd.cpp +++ b/modules/ml/test/test_svmsgd.cpp @@ -134,7 +134,7 @@ void CV_SVMSGDTrainTest::makeTestData(Mat weights, float shift) { int testSamplesCount = 100000; int featureCount = weights.cols; - cv::RNG rng(0); + cv::RNG rng(42); testSamples.create(testSamplesCount, featureCount, CV_32FC1); for (int featureIndex = 0; featureIndex < featureCount; featureIndex++) diff --git a/samples/cpp/train_svmsgd.cpp b/samples/cpp/train_svmsgd.cpp index aed8228c4b..a68f613b2f 100644 --- a/samples/cpp/train_svmsgd.cpp +++ b/samples/cpp/train_svmsgd.cpp @@ -6,8 +6,6 @@ using namespace cv; using namespace cv::ml; -#define WIDTH 841 -#define HEIGHT 594 struct Data { @@ -17,6 +15,8 @@ struct Data Data() { + const int WIDTH = 841; + const int HEIGHT = 594; img = Mat::zeros(HEIGHT, WIDTH, CV_8UC3); imshow("Train svmsgd", img); }