background_segm.hpp 12.7 KB
Newer Older
wester committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
/*M///////////////////////////////////////////////////////////////////////////////////////
//
//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
//  By downloading, copying, installing or using the software you agree to this license.
//  If you do not agree to this license, do not download, install,
//  copy or use the software.
//
//
//                          License Agreement
//                For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
//   * Redistribution's of source code must retain the above copyright notice,
//     this list of conditions and the following disclaimer.
//
//   * Redistribution's in binary form must reproduce the above copyright notice,
//     this list of conditions and the following disclaimer in the documentation
//     and/or other materials provided with the distribution.
//
//   * The name of the copyright holders may not be used to endorse or promote products
//     derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/

#ifndef __OPENCV_BACKGROUND_SEGM_HPP__
#define __OPENCV_BACKGROUND_SEGM_HPP__

#include "opencv2/core.hpp"

namespace cv
{

//! @addtogroup video_motion
//! @{

/** @brief Base class for background/foreground segmentation. :

The class is only used to define the common interface for the whole family of background/foreground
segmentation algorithms.
 */
class CV_EXPORTS_W BackgroundSubtractor : public Algorithm
{
public:
    /** @brief Computes a foreground mask.

    @param image Next video frame.
    @param fgmask The output foreground mask as an 8-bit binary image.
    @param learningRate The value between 0 and 1 that indicates how fast the background model is
    learnt. Negative parameter value makes the algorithm to use some automatically chosen learning
    rate. 0 means that the background model is not updated at all, 1 means that the background model
    is completely reinitialized from the last frame.
     */
    CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) = 0;

    /** @brief Computes a background image.

    @param backgroundImage The output background image.

    @note Sometimes the background image can be very blurry, as it contain the average background
    statistics.
     */
    CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage) const = 0;
};


/** @brief Gaussian Mixture-based Background/Foreground Segmentation Algorithm.

The class implements the Gaussian mixture model background subtraction described in @cite Zivkovic2004
and @cite Zivkovic2006 .
 */
class CV_EXPORTS_W BackgroundSubtractorMOG2 : public BackgroundSubtractor
{
public:
    /** @brief Returns the number of last frames that affect the background model
    */
    CV_WRAP virtual int getHistory() const = 0;
    /** @brief Sets the number of last frames that affect the background model
    */
    CV_WRAP virtual void setHistory(int history) = 0;

    /** @brief Returns the number of gaussian components in the background model
    */
    CV_WRAP virtual int getNMixtures() const = 0;
    /** @brief Sets the number of gaussian components in the background model.

    The model needs to be reinitalized to reserve memory.
    */
    CV_WRAP virtual void setNMixtures(int nmixtures) = 0;//needs reinitialization!

    /** @brief Returns the "background ratio" parameter of the algorithm

    If a foreground pixel keeps semi-constant value for about backgroundRatio\*history frames, it's
    considered background and added to the model as a center of a new component. It corresponds to TB
    parameter in the paper.
     */
    CV_WRAP virtual double getBackgroundRatio() const = 0;
    /** @brief Sets the "background ratio" parameter of the algorithm
    */
    CV_WRAP virtual void setBackgroundRatio(double ratio) = 0;

    /** @brief Returns the variance threshold for the pixel-model match

    The main threshold on the squared Mahalanobis distance to decide if the sample is well described by
    the background model or not. Related to Cthr from the paper.
     */
    CV_WRAP virtual double getVarThreshold() const = 0;
    /** @brief Sets the variance threshold for the pixel-model match
    */
    CV_WRAP virtual void setVarThreshold(double varThreshold) = 0;

    /** @brief Returns the variance threshold for the pixel-model match used for new mixture component generation

    Threshold for the squared Mahalanobis distance that helps decide when a sample is close to the
    existing components (corresponds to Tg in the paper). If a pixel is not close to any component, it
    is considered foreground or added as a new component. 3 sigma =\> Tg=3\*3=9 is default. A smaller Tg
    value generates more components. A higher Tg value may result in a small number of components but
    they can grow too large.
     */
    CV_WRAP virtual double getVarThresholdGen() const = 0;
    /** @brief Sets the variance threshold for the pixel-model match used for new mixture component generation
    */
    CV_WRAP virtual void setVarThresholdGen(double varThresholdGen) = 0;

    /** @brief Returns the initial variance of each gaussian component
    */
    CV_WRAP virtual double getVarInit() const = 0;
    /** @brief Sets the initial variance of each gaussian component
    */
    CV_WRAP virtual void setVarInit(double varInit) = 0;

    CV_WRAP virtual double getVarMin() const = 0;
    CV_WRAP virtual void setVarMin(double varMin) = 0;

    CV_WRAP virtual double getVarMax() const = 0;
    CV_WRAP virtual void setVarMax(double varMax) = 0;

    /** @brief Returns the complexity reduction threshold

    This parameter defines the number of samples needed to accept to prove the component exists. CT=0.05
    is a default value for all the samples. By setting CT=0 you get an algorithm very similar to the
    standard Stauffer&Grimson algorithm.
     */
    CV_WRAP virtual double getComplexityReductionThreshold() const = 0;
    /** @brief Sets the complexity reduction threshold
    */
    CV_WRAP virtual void setComplexityReductionThreshold(double ct) = 0;

    /** @brief Returns the shadow detection flag

    If true, the algorithm detects shadows and marks them. See createBackgroundSubtractorMOG2 for
    details.
     */
    CV_WRAP virtual bool getDetectShadows() const = 0;
    /** @brief Enables or disables shadow detection
    */
    CV_WRAP virtual void setDetectShadows(bool detectShadows) = 0;

    /** @brief Returns the shadow value

    Shadow value is the value used to mark shadows in the foreground mask. Default value is 127. Value 0
    in the mask always means background, 255 means foreground.
     */
    CV_WRAP virtual int getShadowValue() const = 0;
    /** @brief Sets the shadow value
    */
    CV_WRAP virtual void setShadowValue(int value) = 0;

    /** @brief Returns the shadow threshold

    A shadow is detected if pixel is a darker version of the background. The shadow threshold (Tau in
    the paper) is a threshold defining how much darker the shadow can be. Tau= 0.5 means that if a pixel
    is more than twice darker then it is not shadow. See Prati, Mikic, Trivedi and Cucchiarra,
    *Detecting Moving Shadows...*, IEEE PAMI,2003.
     */
    CV_WRAP virtual double getShadowThreshold() const = 0;
    /** @brief Sets the shadow threshold
    */
    CV_WRAP virtual void setShadowThreshold(double threshold) = 0;
};

/** @brief Creates MOG2 Background Subtractor

@param history Length of the history.
@param varThreshold Threshold on the squared Mahalanobis distance between the pixel and the model
to decide whether a pixel is well described by the background model. This parameter does not
affect the background update.
@param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the
speed a bit, so if you do not need this feature, set the parameter to false.
 */
CV_EXPORTS_W Ptr<BackgroundSubtractorMOG2>
    createBackgroundSubtractorMOG2(int history=500, double varThreshold=16,
                                   bool detectShadows=true);

/** @brief K-nearest neigbours - based Background/Foreground Segmentation Algorithm.

The class implements the K-nearest neigbours background subtraction described in @cite Zivkovic2006 .
Very efficient if number of foreground pixels is low.
 */
class CV_EXPORTS_W BackgroundSubtractorKNN : public BackgroundSubtractor
{
public:
    /** @brief Returns the number of last frames that affect the background model
    */
    CV_WRAP virtual int getHistory() const = 0;
    /** @brief Sets the number of last frames that affect the background model
    */
    CV_WRAP virtual void setHistory(int history) = 0;

    /** @brief Returns the number of data samples in the background model
    */
    CV_WRAP virtual int getNSamples() const = 0;
    /** @brief Sets the number of data samples in the background model.

    The model needs to be reinitalized to reserve memory.
    */
    CV_WRAP virtual void setNSamples(int _nN) = 0;//needs reinitialization!

    /** @brief Returns the threshold on the squared distance between the pixel and the sample

    The threshold on the squared distance between the pixel and the sample to decide whether a pixel is
    close to a data sample.
     */
    CV_WRAP virtual double getDist2Threshold() const = 0;
    /** @brief Sets the threshold on the squared distance
    */
    CV_WRAP virtual void setDist2Threshold(double _dist2Threshold) = 0;

    /** @brief Returns the number of neighbours, the k in the kNN.

    K is the number of samples that need to be within dist2Threshold in order to decide that that
    pixel is matching the kNN background model.
     */
    CV_WRAP virtual int getkNNSamples() const = 0;
    /** @brief Sets the k in the kNN. How many nearest neigbours need to match.
    */
    CV_WRAP virtual void setkNNSamples(int _nkNN) = 0;

    /** @brief Returns the shadow detection flag

    If true, the algorithm detects shadows and marks them. See createBackgroundSubtractorKNN for
    details.
     */
    CV_WRAP virtual bool getDetectShadows() const = 0;
    /** @brief Enables or disables shadow detection
    */
    CV_WRAP virtual void setDetectShadows(bool detectShadows) = 0;

    /** @brief Returns the shadow value

    Shadow value is the value used to mark shadows in the foreground mask. Default value is 127. Value 0
    in the mask always means background, 255 means foreground.
     */
    CV_WRAP virtual int getShadowValue() const = 0;
    /** @brief Sets the shadow value
    */
    CV_WRAP virtual void setShadowValue(int value) = 0;

    /** @brief Returns the shadow threshold

    A shadow is detected if pixel is a darker version of the background. The shadow threshold (Tau in
    the paper) is a threshold defining how much darker the shadow can be. Tau= 0.5 means that if a pixel
    is more than twice darker then it is not shadow. See Prati, Mikic, Trivedi and Cucchiarra,
    *Detecting Moving Shadows...*, IEEE PAMI,2003.
     */
    CV_WRAP virtual double getShadowThreshold() const = 0;
    /** @brief Sets the shadow threshold
     */
    CV_WRAP virtual void setShadowThreshold(double threshold) = 0;
};

/** @brief Creates KNN Background Subtractor

@param history Length of the history.
@param dist2Threshold Threshold on the squared distance between the pixel and the sample to decide
whether a pixel is close to that sample. This parameter does not affect the background update.
@param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the
speed a bit, so if you do not need this feature, set the parameter to false.
 */
CV_EXPORTS_W Ptr<BackgroundSubtractorKNN>
    createBackgroundSubtractorKNN(int history=500, double dist2Threshold=400.0,
                                   bool detectShadows=true);

//! @} video_motion

} // cv

#endif