public class opencv_video extends opencv_video
| Modifier and Type | Class and Description |
|---|---|
static class |
opencv_video.BackgroundSubtractor
The Base Class for Background/Foreground Segmentation
The class is only used to define the common interface for
the whole family of background/foreground segmentation algorithms.
|
static class |
opencv_video.BackgroundSubtractorGMG
Background Subtractor module.
|
static class |
opencv_video.BackgroundSubtractorMOG
Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm
The class implements the following algorithm:
"An improved adaptive background mixture model for real-time tracking with shadow detection"
P.
|
static class |
opencv_video.BackgroundSubtractorMOG2
The class implements the following algorithm:
"Improved adaptive Gausian mixture model for background subtraction"
Z.Zivkovic
International Conference Pattern Recognition, UK, August, 2004.
|
static class |
opencv_video.CvKalman |
static class |
opencv_video.DenseOpticalFlow |
static class |
opencv_video.KalmanFilter
Kalman filter.
|
opencv_video.AbstractCvKalman| Modifier and Type | Field and Description |
|---|---|
static int |
CV_LKFLOW_GET_MIN_EIGENVALS |
static int |
CV_LKFLOW_INITIAL_GUESSES |
static int |
CV_LKFLOW_PYR_A_READY
optical flow
|
static int |
CV_LKFLOW_PYR_B_READY |
static int |
OPTFLOW_FARNEBACK_GAUSSIAN
enum cv::
|
static int |
OPTFLOW_LK_GET_MIN_EIGENVALS
enum cv::
|
static int |
OPTFLOW_USE_INITIAL_FLOW
enum cv::
|
| Constructor and Description |
|---|
opencv_video() |
| Modifier and Type | Method and Description |
|---|---|
static int |
buildOpticalFlowPyramid(opencv_core.Mat img,
opencv_core.MatVector pyramid,
opencv_core.Size winSize,
int maxLevel) |
static int |
buildOpticalFlowPyramid(opencv_core.Mat img,
opencv_core.MatVector pyramid,
opencv_core.Size winSize,
int maxLevel,
boolean withDerivatives,
int pyrBorder,
int derivBorder,
boolean tryReuseInputImage)
constructs a pyramid which can be used as input for calcOpticalFlowPyrLK
|
static double |
calcGlobalOrientation(opencv_core.Mat orientation,
opencv_core.Mat mask,
opencv_core.Mat mhi,
double timestamp,
double duration)
computes the global orientation of the selected motion history image part
|
static void |
calcMotionGradient(opencv_core.Mat mhi,
opencv_core.Mat mask,
opencv_core.Mat orientation,
double delta1,
double delta2) |
static void |
calcMotionGradient(opencv_core.Mat mhi,
opencv_core.Mat mask,
opencv_core.Mat orientation,
double delta1,
double delta2,
int apertureSize)
computes the motion gradient orientation image from the motion history image
|
static void |
calcOpticalFlowFarneback(opencv_core.Mat prev,
opencv_core.Mat next,
opencv_core.Mat flow,
double pyr_scale,
int levels,
int winsize,
int iterations,
int poly_n,
double poly_sigma,
int flags)
computes dense optical flow using Farneback algorithm
|
static void |
calcOpticalFlowPyrLK(opencv_core.Mat prevImg,
opencv_core.Mat nextImg,
opencv_core.Mat prevPts,
opencv_core.Mat nextPts,
opencv_core.Mat status,
opencv_core.Mat err) |
static void |
calcOpticalFlowPyrLK(opencv_core.Mat prevImg,
opencv_core.Mat nextImg,
opencv_core.Mat prevPts,
opencv_core.Mat nextPts,
opencv_core.Mat status,
opencv_core.Mat err,
opencv_core.Size winSize,
int maxLevel,
opencv_core.TermCriteria criteria,
int flags,
double minEigThreshold)
computes sparse optical flow using multi-scale Lucas-Kanade algorithm
|
static void |
calcOpticalFlowSF(opencv_core.Mat from,
opencv_core.Mat to,
opencv_core.Mat flow,
int layers,
int averaging_block_size,
int max_flow)
computes dense optical flow using Simple Flow algorithm
|
static void |
calcOpticalFlowSF(opencv_core.Mat from,
opencv_core.Mat to,
opencv_core.Mat flow,
int layers,
int averaging_block_size,
int max_flow,
double sigma_dist,
double sigma_color,
int postprocess_window,
double sigma_dist_fix,
double sigma_color_fix,
double occ_thr,
int upscale_averaging_radius,
double upscale_sigma_dist,
double upscale_sigma_color,
double speed_up_thr) |
static opencv_core.RotatedRect |
CamShift(opencv_core.Mat probImage,
opencv_core.Rect window,
opencv_core.TermCriteria criteria)
updates the object tracking window using CAMSHIFT algorithm
|
static opencv_video.DenseOpticalFlow |
createOptFlow_DualTVL1() |
static void |
cvCalcAffineFlowPyrLK(opencv_core.CvArr prev,
opencv_core.CvArr curr,
opencv_core.CvArr prev_pyr,
opencv_core.CvArr curr_pyr,
float[] prev_features,
float[] curr_features,
float[] matrices,
int count,
opencv_core.CvSize win_size,
int level,
byte[] status,
float[] track_error,
opencv_core.CvTermCriteria criteria,
int flags) |
static void |
cvCalcAffineFlowPyrLK(opencv_core.CvArr prev,
opencv_core.CvArr curr,
opencv_core.CvArr prev_pyr,
opencv_core.CvArr curr_pyr,
FloatBuffer prev_features,
FloatBuffer curr_features,
FloatBuffer matrices,
int count,
opencv_core.CvSize win_size,
int level,
ByteBuffer status,
FloatBuffer track_error,
opencv_core.CvTermCriteria criteria,
int flags) |
static void |
cvCalcAffineFlowPyrLK(opencv_core.CvArr prev,
opencv_core.CvArr curr,
opencv_core.CvArr prev_pyr,
opencv_core.CvArr curr_pyr,
opencv_core.CvPoint2D32f prev_features,
opencv_core.CvPoint2D32f curr_features,
FloatPointer matrices,
int count,
opencv_core.CvSize win_size,
int level,
BytePointer status,
FloatPointer track_error,
opencv_core.CvTermCriteria criteria,
int flags) |
static double |
cvCalcGlobalOrientation(opencv_core.CvArr orientation,
opencv_core.CvArr mask,
opencv_core.CvArr mhi,
double timestamp,
double duration) |
static void |
cvCalcMotionGradient(opencv_core.CvArr mhi,
opencv_core.CvArr mask,
opencv_core.CvArr orientation,
double delta1,
double delta2) |
static void |
cvCalcMotionGradient(opencv_core.CvArr mhi,
opencv_core.CvArr mask,
opencv_core.CvArr orientation,
double delta1,
double delta2,
int aperture_size) |
static void |
cvCalcOpticalFlowFarneback(opencv_core.CvArr prev,
opencv_core.CvArr next,
opencv_core.CvArr flow,
double pyr_scale,
int levels,
int winsize,
int iterations,
int poly_n,
double poly_sigma,
int flags) |
static void |
cvCalcOpticalFlowPyrLK(opencv_core.CvArr prev,
opencv_core.CvArr curr,
opencv_core.CvArr prev_pyr,
opencv_core.CvArr curr_pyr,
float[] prev_features,
float[] curr_features,
int count,
opencv_core.CvSize win_size,
int level,
byte[] status,
float[] track_error,
opencv_core.CvTermCriteria criteria,
int flags) |
static void |
cvCalcOpticalFlowPyrLK(opencv_core.CvArr prev,
opencv_core.CvArr curr,
opencv_core.CvArr prev_pyr,
opencv_core.CvArr curr_pyr,
FloatBuffer prev_features,
FloatBuffer curr_features,
int count,
opencv_core.CvSize win_size,
int level,
ByteBuffer status,
FloatBuffer track_error,
opencv_core.CvTermCriteria criteria,
int flags) |
static void |
cvCalcOpticalFlowPyrLK(opencv_core.CvArr prev,
opencv_core.CvArr curr,
opencv_core.CvArr prev_pyr,
opencv_core.CvArr curr_pyr,
opencv_core.CvPoint2D32f prev_features,
opencv_core.CvPoint2D32f curr_features,
int count,
opencv_core.CvSize win_size,
int level,
BytePointer status,
FloatPointer track_error,
opencv_core.CvTermCriteria criteria,
int flags) |
static int |
cvCamShift(opencv_core.CvArr prob_image,
opencv_core.CvRect window,
opencv_core.CvTermCriteria criteria,
opencv_imgproc.CvConnectedComp comp) |
static int |
cvCamShift(opencv_core.CvArr prob_image,
opencv_core.CvRect window,
opencv_core.CvTermCriteria criteria,
opencv_imgproc.CvConnectedComp comp,
opencv_core.CvBox2D box)
\
Tracking *
\
|
static opencv_video.CvKalman |
cvCreateKalman(int dynam_params,
int measure_params) |
static opencv_video.CvKalman |
cvCreateKalman(int dynam_params,
int measure_params,
int control_params) |
static int |
cvEstimateRigidTransform(opencv_core.CvArr A,
opencv_core.CvArr B,
opencv_core.CvMat M,
int full_affine) |
static opencv_core.CvMat |
cvKalmanCorrect(opencv_video.CvKalman kalman,
opencv_core.CvMat measurement) |
static opencv_core.CvMat |
cvKalmanPredict(opencv_video.CvKalman kalman) |
static opencv_core.CvMat |
cvKalmanPredict(opencv_video.CvKalman kalman,
opencv_core.CvMat control) |
static opencv_core.CvMat |
cvKalmanUpdateByMeasurement(opencv_video.CvKalman arg1,
opencv_core.CvMat arg2) |
static opencv_core.CvMat |
cvKalmanUpdateByTime(opencv_video.CvKalman arg1,
opencv_core.CvMat arg2) |
static int |
cvMeanShift(opencv_core.CvArr prob_image,
opencv_core.CvRect window,
opencv_core.CvTermCriteria criteria,
opencv_imgproc.CvConnectedComp comp) |
static void |
cvReleaseKalman(opencv_video.CvKalman kalman) |
static void |
cvReleaseKalman(PointerPointer kalman) |
static opencv_core.CvSeq |
cvSegmentMotion(opencv_core.CvArr mhi,
opencv_core.CvArr seg_mask,
opencv_core.CvMemStorage storage,
double timestamp,
double seg_thresh) |
static void |
cvUpdateMotionHistory(opencv_core.CvArr silhouette,
opencv_core.CvArr mhi,
double timestamp,
double duration)
\
All the motion template functions work only with single channel images.
|
static opencv_core.Mat |
estimateRigidTransform(opencv_core.Mat src,
opencv_core.Mat dst,
boolean fullAffine)
estimates the best-fit Euqcidean, similarity, affine or perspective transformation
// that maps one 2D point set to another or one image to another.
|
static boolean |
initModule_video() |
static int |
meanShift(opencv_core.Mat probImage,
opencv_core.Rect window,
opencv_core.TermCriteria criteria)
updates the object tracking window using meanshift algorithm
|
static void |
segmentMotion(opencv_core.Mat mhi,
opencv_core.Mat segmask,
opencv_core.Rect boundingRects,
double timestamp,
double segThresh) |
static void |
updateMotionHistory(opencv_core.Mat silhouette,
opencv_core.Mat mhi,
double timestamp,
double duration)
updates motion history image using the current silhouette
|
mappublic static final int CV_LKFLOW_PYR_A_READY
public static final int CV_LKFLOW_PYR_B_READY
public static final int CV_LKFLOW_INITIAL_GUESSES
public static final int CV_LKFLOW_GET_MIN_EIGENVALS
public static final int OPTFLOW_USE_INITIAL_FLOW
public static final int OPTFLOW_LK_GET_MIN_EIGENVALS
public static final int OPTFLOW_FARNEBACK_GAUSSIAN
public static boolean initModule_video()
public static void cvCalcOpticalFlowPyrLK(opencv_core.CvArr prev, opencv_core.CvArr curr, opencv_core.CvArr prev_pyr, opencv_core.CvArr curr_pyr, opencv_core.CvPoint2D32f prev_features, opencv_core.CvPoint2D32f curr_features, int count, opencv_core.CvSize win_size, int level, BytePointer status, FloatPointer track_error, opencv_core.CvTermCriteria criteria, int flags)
public static void cvCalcOpticalFlowPyrLK(opencv_core.CvArr prev, opencv_core.CvArr curr, opencv_core.CvArr prev_pyr, opencv_core.CvArr curr_pyr, FloatBuffer prev_features, FloatBuffer curr_features, int count, opencv_core.CvSize win_size, int level, ByteBuffer status, FloatBuffer track_error, opencv_core.CvTermCriteria criteria, int flags)
public static void cvCalcOpticalFlowPyrLK(opencv_core.CvArr prev, opencv_core.CvArr curr, opencv_core.CvArr prev_pyr, opencv_core.CvArr curr_pyr, float[] prev_features, float[] curr_features, int count, opencv_core.CvSize win_size, int level, byte[] status, float[] track_error, opencv_core.CvTermCriteria criteria, int flags)
public static void cvCalcAffineFlowPyrLK(opencv_core.CvArr prev, opencv_core.CvArr curr, opencv_core.CvArr prev_pyr, opencv_core.CvArr curr_pyr, opencv_core.CvPoint2D32f prev_features, opencv_core.CvPoint2D32f curr_features, FloatPointer matrices, int count, opencv_core.CvSize win_size, int level, BytePointer status, FloatPointer track_error, opencv_core.CvTermCriteria criteria, int flags)
public static void cvCalcAffineFlowPyrLK(opencv_core.CvArr prev, opencv_core.CvArr curr, opencv_core.CvArr prev_pyr, opencv_core.CvArr curr_pyr, FloatBuffer prev_features, FloatBuffer curr_features, FloatBuffer matrices, int count, opencv_core.CvSize win_size, int level, ByteBuffer status, FloatBuffer track_error, opencv_core.CvTermCriteria criteria, int flags)
public static void cvCalcAffineFlowPyrLK(opencv_core.CvArr prev, opencv_core.CvArr curr, opencv_core.CvArr prev_pyr, opencv_core.CvArr curr_pyr, float[] prev_features, float[] curr_features, float[] matrices, int count, opencv_core.CvSize win_size, int level, byte[] status, float[] track_error, opencv_core.CvTermCriteria criteria, int flags)
public static int cvEstimateRigidTransform(opencv_core.CvArr A, opencv_core.CvArr B, opencv_core.CvMat M, int full_affine)
public static void cvCalcOpticalFlowFarneback(opencv_core.CvArr prev, opencv_core.CvArr next, opencv_core.CvArr flow, double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags)
public static void cvUpdateMotionHistory(opencv_core.CvArr silhouette, opencv_core.CvArr mhi, double timestamp, double duration)
public static void cvCalcMotionGradient(opencv_core.CvArr mhi, opencv_core.CvArr mask, opencv_core.CvArr orientation, double delta1, double delta2, int aperture_size)
public static void cvCalcMotionGradient(opencv_core.CvArr mhi, opencv_core.CvArr mask, opencv_core.CvArr orientation, double delta1, double delta2)
public static double cvCalcGlobalOrientation(opencv_core.CvArr orientation, opencv_core.CvArr mask, opencv_core.CvArr mhi, double timestamp, double duration)
public static opencv_core.CvSeq cvSegmentMotion(opencv_core.CvArr mhi, opencv_core.CvArr seg_mask, opencv_core.CvMemStorage storage, double timestamp, double seg_thresh)
public static int cvCamShift(opencv_core.CvArr prob_image, opencv_core.CvRect window, opencv_core.CvTermCriteria criteria, opencv_imgproc.CvConnectedComp comp, opencv_core.CvBox2D box)
public static int cvCamShift(opencv_core.CvArr prob_image, opencv_core.CvRect window, opencv_core.CvTermCriteria criteria, opencv_imgproc.CvConnectedComp comp)
public static int cvMeanShift(opencv_core.CvArr prob_image, opencv_core.CvRect window, opencv_core.CvTermCriteria criteria, opencv_imgproc.CvConnectedComp comp)
public static opencv_video.CvKalman cvCreateKalman(int dynam_params, int measure_params, int control_params)
public static opencv_video.CvKalman cvCreateKalman(int dynam_params, int measure_params)
public static void cvReleaseKalman(PointerPointer kalman)
public static void cvReleaseKalman(opencv_video.CvKalman kalman)
public static opencv_core.CvMat cvKalmanPredict(opencv_video.CvKalman kalman, opencv_core.CvMat control)
public static opencv_core.CvMat cvKalmanPredict(opencv_video.CvKalman kalman)
public static opencv_core.CvMat cvKalmanCorrect(opencv_video.CvKalman kalman, opencv_core.CvMat measurement)
public static opencv_core.CvMat cvKalmanUpdateByTime(opencv_video.CvKalman arg1, opencv_core.CvMat arg2)
public static opencv_core.CvMat cvKalmanUpdateByMeasurement(opencv_video.CvKalman arg1, opencv_core.CvMat arg2)
public static void updateMotionHistory(opencv_core.Mat silhouette, opencv_core.Mat mhi, double timestamp, double duration)
public static void calcMotionGradient(opencv_core.Mat mhi, opencv_core.Mat mask, opencv_core.Mat orientation, double delta1, double delta2, int apertureSize)
public static void calcMotionGradient(opencv_core.Mat mhi, opencv_core.Mat mask, opencv_core.Mat orientation, double delta1, double delta2)
public static double calcGlobalOrientation(opencv_core.Mat orientation, opencv_core.Mat mask, opencv_core.Mat mhi, double timestamp, double duration)
public static void segmentMotion(opencv_core.Mat mhi, opencv_core.Mat segmask, opencv_core.Rect boundingRects, double timestamp, double segThresh)
public static opencv_core.RotatedRect CamShift(opencv_core.Mat probImage, opencv_core.Rect window, opencv_core.TermCriteria criteria)
public static int meanShift(opencv_core.Mat probImage, opencv_core.Rect window, opencv_core.TermCriteria criteria)
public static int buildOpticalFlowPyramid(opencv_core.Mat img, opencv_core.MatVector pyramid, opencv_core.Size winSize, int maxLevel, boolean withDerivatives, int pyrBorder, int derivBorder, boolean tryReuseInputImage)
public static int buildOpticalFlowPyramid(opencv_core.Mat img, opencv_core.MatVector pyramid, opencv_core.Size winSize, int maxLevel)
public static void calcOpticalFlowPyrLK(opencv_core.Mat prevImg, opencv_core.Mat nextImg, opencv_core.Mat prevPts, opencv_core.Mat nextPts, opencv_core.Mat status, opencv_core.Mat err, opencv_core.Size winSize, int maxLevel, opencv_core.TermCriteria criteria, int flags, double minEigThreshold)
public static void calcOpticalFlowPyrLK(opencv_core.Mat prevImg, opencv_core.Mat nextImg, opencv_core.Mat prevPts, opencv_core.Mat nextPts, opencv_core.Mat status, opencv_core.Mat err)
public static void calcOpticalFlowFarneback(opencv_core.Mat prev, opencv_core.Mat next, opencv_core.Mat flow, double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags)
public static opencv_core.Mat estimateRigidTransform(opencv_core.Mat src, opencv_core.Mat dst, boolean fullAffine)
public static void calcOpticalFlowSF(opencv_core.Mat from, opencv_core.Mat to, opencv_core.Mat flow, int layers, int averaging_block_size, int max_flow)
public static void calcOpticalFlowSF(opencv_core.Mat from, opencv_core.Mat to, opencv_core.Mat flow, int layers, int averaging_block_size, int max_flow, double sigma_dist, double sigma_color, int postprocess_window, double sigma_dist_fix, double sigma_color_fix, double occ_thr, int upscale_averaging_radius, double upscale_sigma_dist, double upscale_sigma_color, double speed_up_thr)
public static opencv_video.DenseOpticalFlow createOptFlow_DualTVL1()
Copyright © 2014. All Rights Reserved.