| Package | Description |
|---|---|
| org.bytedeco.javacpp | |
| org.bytedeco.javacpp.helper |
| Modifier and Type | Method and Description |
|---|---|
opencv_core.Mat |
opencv_core.Mat.adjustROI(int dtop,
int dbottom,
int dleft,
int dright)
moves/resizes the current matrix ROI inside the parent matrix.
|
opencv_core.Mat |
opencv_core.Mat.allocator(opencv_core.MatAllocator allocator) |
opencv_core.Mat |
opencv_core.Mat.apply(opencv_core.Range ranges) |
opencv_core.Mat |
opencv_core.Mat.apply(opencv_core.Range rowRange,
opencv_core.Range colRange)
extracts a rectangular sub-matrix
// (this is a generalized form of row, rowRange etc.)
|
opencv_core.Mat |
opencv_core.Mat.apply(opencv_core.Rect roi) |
opencv_core.Mat |
opencv_core.NAryMatIterator.arrays(int i)
the iterated arrays
|
opencv_core.Mat |
opencv_core.PCA.backProject(opencv_core.Mat vec)
reconstructs the original vector from the projection
|
opencv_core.Mat |
opencv_core.Mat.clone()
returns deep copy of the matrix, i.e.
|
opencv_core.Mat |
opencv_contrib.BOWMSCTrainer.cluster() |
opencv_core.Mat |
opencv_features2d.BOWTrainer.cluster() |
opencv_core.Mat |
opencv_features2d.BOWKMeansTrainer.cluster() |
opencv_core.Mat |
opencv_contrib.BOWMSCTrainer.cluster(opencv_core.Mat descriptors) |
opencv_core.Mat |
opencv_features2d.BOWTrainer.cluster(opencv_core.Mat descriptors) |
opencv_core.Mat |
opencv_features2d.BOWKMeansTrainer.cluster(opencv_core.Mat descriptors) |
opencv_core.Mat |
opencv_core.Mat.col(int x)
returns a new matrix header for the specified column
|
opencv_core.Mat |
opencv_core.Mat.colRange(int startcol,
int endcol)
...
|
opencv_core.Mat |
opencv_core.Mat.colRange(opencv_core.Range r) |
opencv_core.Mat |
opencv_core.Mat.cols(int cols) |
opencv_core.Mat |
opencv_video.KalmanFilter.controlMatrix()
control matrix (B) (not used if there is no control)
|
opencv_core.Mat |
opencv_video.KalmanFilter.correct(opencv_core.Mat measurement)
updates the predicted state from the measurement
|
opencv_core.Mat |
opencv_core.Mat.cross(opencv_core.Mat m)
computes cross-product of 2 3D vectors
|
static opencv_core.Mat |
opencv_core.cvarrToMat(opencv_core.CvArr arr) |
static opencv_core.Mat |
opencv_core.cvarrToMat(opencv_core.CvArr arr,
boolean copyData,
boolean allowND,
int coiMode)
converts array (CvMat or IplImage) to cv::Mat
|
opencv_core.Mat |
opencv_core.Mat.data(BytePointer data) |
opencv_core.Mat |
opencv_core.Mat.dataend(BytePointer dataend) |
opencv_core.Mat |
opencv_core.Mat.datalimit(BytePointer datalimit) |
opencv_core.Mat |
opencv_core.Mat.datastart(BytePointer datastart) |
opencv_core.Mat |
opencv_stitching.ImageFeatures.descriptors() |
opencv_core.Mat |
opencv_core.Mat.diag() |
opencv_core.Mat |
opencv_core.Mat.diag(int d)
...
|
static opencv_core.Mat |
opencv_core.Mat.diag(opencv_core.Mat d)
constructs a square diagonal matrix which main diagonal is vector "d"
|
opencv_core.Mat |
opencv_core.Mat.dims(int dims) |
opencv_core.Mat |
opencv_contrib.CvFeatureTracker.disp_matches() |
opencv_core.Mat |
opencv_videostab.FastMarchingMethod.distanceMap() |
opencv_core.Mat |
opencv_contrib.LDA.eigenvalues() |
opencv_core.Mat |
opencv_core.PCA.eigenvalues()
eigenvalues of the covariation matrix
|
opencv_core.Mat |
opencv_contrib.LDA.eigenvectors() |
opencv_core.Mat |
opencv_core.PCA.eigenvectors()
eigenvectors of the covariation matrix
|
static opencv_core.Mat |
opencv_videostab.ensureInclusionConstraint(opencv_core.Mat M,
opencv_core.Size size,
float trimRatio) |
opencv_core.Mat |
opencv_video.KalmanFilter.errorCovPost()
posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k)
|
opencv_core.Mat |
opencv_video.KalmanFilter.errorCovPre()
priori error estimate covariance matrix (P'(k)): P'(k)=A*P(k-1)*At + Q)
|
opencv_core.Mat |
opencv_videostab.IGlobalMotionEstimator.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1) |
opencv_core.Mat |
opencv_videostab.PyrLkRobustMotionEstimator.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.Point2f points0,
opencv_core.Point2f points1) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.Point2f points0,
opencv_core.Point2f points1,
int model,
float[] rmse) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.Point2f points0,
opencv_core.Point2f points1,
int model,
FloatBuffer rmse) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.Point2f points0,
opencv_core.Point2f points1,
int model,
FloatPointer rmse) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRobust(opencv_core.Point2f points0,
opencv_core.Point2f points1) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRobust(opencv_core.Point2f points0,
opencv_core.Point2f points1,
int model,
opencv_videostab.RansacParams params,
float[] rmse,
int[] ninliers) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRobust(opencv_core.Point2f points0,
opencv_core.Point2f points1,
int model,
opencv_videostab.RansacParams params,
FloatBuffer rmse,
IntBuffer ninliers) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRobust(opencv_core.Point2f points0,
opencv_core.Point2f points1,
int model,
opencv_videostab.RansacParams params,
FloatPointer rmse,
IntPointer ninliers) |
static opencv_core.Mat |
opencv_video.estimateRigidTransform(opencv_core.Mat src,
opencv_core.Mat dst,
boolean fullAffine)
estimates the best-fit Euqcidean, similarity, affine or perspective transformation
// that maps one 2D point set to another or one image to another.
|
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2,
int method,
double param1,
double param2,
opencv_core.Mat mask)
finds fundamental matrix from a set of corresponding 2D points
|
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat mask) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat mask,
int method,
double param1,
double param2)
variant of findFundamentalMat for backward compatibility
|
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints,
int method,
double ransacReprojThreshold,
opencv_core.Mat mask)
computes the best-fit perspective transformation mapping srcPoints to dstPoints.
|
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints,
opencv_core.Mat mask) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints,
opencv_core.Mat mask,
int method,
double ransacReprojThreshold)
variant of findHomography for backward compatibility
|
opencv_core.Mat |
opencv_stitching.MatBytePairVector.first(long i) |
opencv_core.Mat |
opencv_core.Mat.flags(int flags) |
opencv_core.Mat |
opencv_video.KalmanFilter.gain()
Kalman gain matrix (K(k)): K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)
|
opencv_core.Mat |
opencv_objdetect.CascadeClassifier.MaskGenerator.generateMask(opencv_core.Mat src) |
opencv_core.Mat |
opencv_core.MatVector.get(long i) |
static opencv_core.Mat |
opencv_imgproc.getAffineTransform(opencv_core.Mat src,
opencv_core.Mat dst) |
static opencv_core.Mat |
opencv_imgproc.getAffineTransform(opencv_core.Point2f src,
opencv_core.Point2f dst)
returns 2x3 affine transformation for the corresponding 3 point pairs.
|
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.Mat cameraMatrix) |
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Size imgsize,
boolean centerPrincipalPoint)
returns the default new camera matrix (by default it is the same as cameraMatrix unless centerPricipalPoint=true)
|
static opencv_core.Mat |
opencv_imgproc.getGaborKernel(opencv_core.Size ksize,
double sigma,
double theta,
double lambd,
double gamma) |
static opencv_core.Mat |
opencv_imgproc.getGaborKernel(opencv_core.Size ksize,
double sigma,
double theta,
double lambd,
double gamma,
double psi,
int ktype)
returns the Gabor kernel with the specified parameters
|
static opencv_core.Mat |
opencv_imgproc.getGaussianKernel(int ksize,
double sigma) |
static opencv_core.Mat |
opencv_imgproc.getGaussianKernel(int ksize,
double sigma,
int ktype)
returns the Gaussian kernel with the specified parameters
|
opencv_core.Mat |
opencv_contrib.CvMeanShiftTracker.getHistogramProjection(int type) |
opencv_core.Mat |
opencv_core.Algorithm.getMat(BytePointer name) |
opencv_core.Mat |
opencv_core.Algorithm.getMat(String name) |
opencv_core.Mat |
opencv_legacy.CvEM.getMeans() |
static opencv_core.Mat |
opencv_videostab.getMotion(int from,
int to,
opencv_core.Mat motions,
int size) |
static opencv_core.Mat |
opencv_videostab.getMotion(int from,
int to,
opencv_core.MatVector motions) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
double alpha) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect validPixROI,
boolean centerPrincipalPoint)
returns the optimal new camera matrix
|
static opencv_core.Mat |
opencv_imgproc.getPerspectiveTransform(opencv_core.Mat src,
opencv_core.Mat dst) |
static opencv_core.Mat |
opencv_imgproc.getPerspectiveTransform(opencv_core.Point2f src,
opencv_core.Point2f dst)
returns 3x3 perspective transformation for the corresponding 4 point pairs.
|
opencv_core.Mat |
opencv_legacy.CvEM.getProbs() |
static opencv_core.Mat |
opencv_imgproc.getRotationMatrix2D(opencv_core.Point2f center,
double angle,
double scale)
returns 2x3 affine transformation matrix for the planar rotation.
|
opencv_core.Mat |
opencv_contrib.SpinImageModel.getSpinImage(long index) |
static opencv_core.Mat |
opencv_imgproc.getStructuringElement(int shape,
opencv_core.Size ksize) |
static opencv_core.Mat |
opencv_imgproc.getStructuringElement(int shape,
opencv_core.Size ksize,
opencv_core.Point anchor)
returns structuring element of the specified shape and size
|
opencv_core.Mat |
opencv_ml.CvDTree.getVarImportance() |
opencv_core.Mat |
opencv_ml.CvRTrees.getVarImportance() |
opencv_core.Mat |
opencv_features2d.BOWImgDescriptorExtractor.getVocabulary() |
opencv_core.Mat |
opencv_legacy.CvEM.getWeights() |
opencv_core.Mat |
opencv_stitching.MatchesInfo.H() |
static opencv_core.Mat |
opencv_highgui.imdecode(opencv_core.Mat buf,
int flags) |
static opencv_core.Mat |
opencv_highgui.imdecode(opencv_core.Mat buf,
int flags,
opencv_core.Mat dst) |
static opencv_core.Mat |
opencv_highgui.imread(BytePointer filename) |
static opencv_core.Mat |
opencv_highgui.imread(BytePointer filename,
int flags) |
static opencv_core.Mat |
opencv_highgui.imread(String filename) |
static opencv_core.Mat |
opencv_highgui.imread(String filename,
int flags) |
static opencv_core.Mat |
opencv_calib3d.initCameraMatrix2D(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize) |
static opencv_core.Mat |
opencv_calib3d.initCameraMatrix2D(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
double aspectRatio)
initializes camera matrix from a few 3D points and the corresponding projections.
|
opencv_core.Mat |
opencv_stitching.CameraParams.K() |
opencv_core.Mat |
opencv_core.MatConstIterator.m() |
opencv_core.Mat |
opencv_contrib.ChowLiuTree.make() |
opencv_core.Mat |
opencv_contrib.ChowLiuTree.make(double infoThreshold) |
opencv_core.Mat |
opencv_stitching.Stitcher.matchingMask() |
opencv_core.Mat |
opencv_core.PCA.mean()
mean value subtracted before the projection and added after the back projection
|
opencv_core.Mat |
opencv_video.KalmanFilter.measurementMatrix()
measurement matrix (H)
|
opencv_core.Mat |
opencv_video.KalmanFilter.measurementNoiseCov()
measurement noise covariance matrix (R)
|
opencv_core.Mat |
opencv_videostab.IFrameSource.nextFrame() |
opencv_core.Mat |
opencv_videostab.NullFrameSource.nextFrame() |
opencv_core.Mat |
opencv_videostab.VideoFileSource.nextFrame() |
opencv_core.Mat |
opencv_videostab.OnePassStabilizer.nextFrame() |
opencv_core.Mat |
opencv_videostab.TwoPassStabilizer.nextFrame() |
opencv_core.Mat |
opencv_contrib.SpinImageModel.packRandomScaledSpins() |
opencv_core.Mat |
opencv_contrib.SpinImageModel.packRandomScaledSpins(boolean separateScale,
long xCount,
long yCount) |
opencv_core.Mat |
opencv_core.NAryMatIterator.planes()
the current planes
|
opencv_core.Mat |
opencv_core.KDTree.points()
all the points.
|
opencv_core.Mat |
opencv_core.Mat.position(int position) |
opencv_core.Mat |
opencv_video.KalmanFilter.predict() |
opencv_core.Mat |
opencv_video.KalmanFilter.predict(opencv_core.Mat control)
computes predicted state
|
opencv_core.Mat |
opencv_video.KalmanFilter.processNoiseCov()
process noise covariance matrix (Q)
|
opencv_core.Mat |
opencv_contrib.LDA.project(opencv_core.Mat src) |
opencv_core.Mat |
opencv_core.PCA.project(opencv_core.Mat vec)
projects vector from the original space to the principal components subspace
|
opencv_core.Mat |
opencv_core.Mat.put(opencv_core.Mat m)
assignment operators
|
opencv_core.Mat |
opencv_core.Mat.put(opencv_core.MatExpr expr) |
opencv_core.Mat |
opencv_core.Mat.put(opencv_core.Scalar s)
sets every matrix element to s
|
opencv_core.Mat |
opencv_stitching.CameraParams.R() |
opencv_core.Mat |
opencv_contrib.LDA.reconstruct(opencv_core.Mat src) |
opencv_core.Mat |
opencv_core.Mat.refcount(IntPointer refcount) |
opencv_core.Mat |
opencv_stitching.BundleAdjusterBase.refinementMask() |
static opencv_core.Mat |
opencv_core.repeat(opencv_core.Mat src,
int ny,
int nx) |
opencv_core.Mat |
opencv_core.Mat.reshape(int cn) |
opencv_core.Mat |
opencv_core.Mat.reshape(int cn,
int rows)
creates alternative matrix header for the same data, with different
// number of channels and/or different number of rows.
|
opencv_core.Mat |
opencv_core.Mat.reshape(int cn,
int newndims,
int[] newsz) |
opencv_core.Mat |
opencv_core.Mat.reshape(int cn,
int newndims,
IntBuffer newsz) |
opencv_core.Mat |
opencv_core.Mat.reshape(int cn,
int newndims,
IntPointer newsz) |
opencv_core.Mat |
opencv_core.Mat.row(int y)
returns a new matrix header for the specified row
|
opencv_core.Mat |
opencv_core.Mat.rowRange(int startrow,
int endrow)
...
|
opencv_core.Mat |
opencv_core.Mat.rowRange(opencv_core.Range r) |
opencv_core.Mat |
opencv_core.Mat.rows(int rows) |
opencv_core.Mat |
opencv_core.Mat.setTo(opencv_core.Mat value) |
opencv_core.Mat |
opencv_core.Mat.setTo(opencv_core.Mat value,
opencv_core.Mat mask)
sets some of the matrix elements to s, according to the mask
|
opencv_core.Mat |
opencv_videostab.MotionFilterBase.stabilize(int index,
opencv_core.Mat motions,
int size) |
opencv_core.Mat |
opencv_videostab.GaussianMotionFilter.stabilize(int index,
opencv_core.Mat motions,
int size) |
opencv_core.Mat |
opencv_video.KalmanFilter.statePost()
corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k))
|
opencv_core.Mat |
opencv_video.KalmanFilter.statePre()
predicted state (x'(k)): x(k)=A*x(k-1)+B*u(k)
|
static opencv_core.Mat |
opencv_contrib.subspaceProject(opencv_core.Mat W,
opencv_core.Mat mean,
opencv_core.Mat src) |
static opencv_core.Mat |
opencv_contrib.subspaceReconstruct(opencv_core.Mat W,
opencv_core.Mat mean,
opencv_core.Mat src) |
opencv_core.Mat |
opencv_stitching.CameraParams.t() |
opencv_core.Mat |
opencv_video.KalmanFilter.temp1() |
opencv_core.Mat |
opencv_video.KalmanFilter.temp2() |
opencv_core.Mat |
opencv_video.KalmanFilter.temp3() |
opencv_core.Mat |
opencv_video.KalmanFilter.temp4() |
opencv_core.Mat |
opencv_video.KalmanFilter.temp5() |
opencv_core.Mat |
opencv_contrib.LogPolar_Interp.to_cartesian(opencv_core.Mat source)
Transformation from cortical image to retinal (inverse log-polar) image.
|
opencv_core.Mat |
opencv_contrib.LogPolar_Overlapping.to_cartesian(opencv_core.Mat source)
Transformation from cortical image to retinal (inverse log-polar) image.
|
opencv_core.Mat |
opencv_contrib.LogPolar_Adjacent.to_cartesian(opencv_core.Mat source)
Transformation from cortical image to retinal (inverse log-polar) image.
|
opencv_core.Mat |
opencv_contrib.LogPolar_Interp.to_cortical(opencv_core.Mat source)
Transformation from Cartesian image to cortical (log-polar) image.
|
opencv_core.Mat |
opencv_contrib.LogPolar_Overlapping.to_cortical(opencv_core.Mat source)
Transformation from Cartesian image to cortical (log-polar) image.
|
opencv_core.Mat |
opencv_contrib.LogPolar_Adjacent.to_cortical(opencv_core.Mat source)
Transformation from Cartesian image to cortical (log-polar) image.
|
opencv_core.Mat |
opencv_video.KalmanFilter.transitionMatrix()
state transition matrix (A)
|
opencv_core.Mat |
opencv_core.SVD.u() |
opencv_core.Mat |
opencv_core.SVD.vt() |
opencv_core.Mat |
opencv_core.SVD.w() |
static opencv_core.Mat |
opencv_features2d.windowedMatchingMask(opencv_features2d.KeyPoint keypoints1,
opencv_features2d.KeyPoint keypoints2,
float maxDeltaX,
float maxDeltaY) |
| Modifier and Type | Method and Description |
|---|---|
static void |
opencv_core.absdiff(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst)
computes element-wise absolute difference of two arrays (dst = abs(src1 - src2))
|
static void |
opencv_imgproc.accumulate(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_imgproc.accumulate(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat mask)
adds image to the accumulator (dst += src).
|
static void |
opencv_imgproc.accumulateProduct(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_imgproc.accumulateProduct(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask)
adds product of the 2 images to the accumulator (dst += src1*src2).
|
static void |
opencv_imgproc.accumulateSquare(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_imgproc.accumulateSquare(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat mask)
adds squared src image to the accumulator (dst += src*src).
|
static void |
opencv_imgproc.accumulateWeighted(opencv_core.Mat src,
opencv_core.Mat dst,
double alpha) |
static void |
opencv_imgproc.accumulateWeighted(opencv_core.Mat src,
opencv_core.Mat dst,
double alpha,
opencv_core.Mat mask)
updates the running average (dst = dst*(1-alpha) + src*alpha)
|
static void |
opencv_imgproc.adaptiveBilateralFilter(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize,
double sigmaSpace) |
static void |
opencv_imgproc.adaptiveBilateralFilter(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize,
double sigmaSpace,
double maxSigmaColor,
opencv_core.Point anchor,
int borderType)
smooths the image using adaptive bilateral filter
|
static void |
opencv_imgproc.adaptiveThreshold(opencv_core.Mat src,
opencv_core.Mat dst,
double maxValue,
int adaptiveMethod,
int thresholdType,
int blockSize,
double C)
applies variable (adaptive) threshold to the image
|
void |
opencv_contrib.FabMap.add(opencv_core.Mat queryImgDescriptor) |
void |
opencv_contrib.FabMap2.add(opencv_core.Mat queryImgDescriptors) |
void |
opencv_contrib.ChowLiuTree.add(opencv_core.Mat imgDescriptor) |
void |
opencv_features2d.BOWTrainer.add(opencv_core.Mat descriptors) |
static void |
opencv_core.add(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.add(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask,
int dtype)
adds one matrix to another (dst = src1 + src2)
|
int |
opencv_objdetect.Detector.addTemplate(opencv_core.MatVector sources,
BytePointer class_id,
opencv_core.Mat object_mask) |
int |
opencv_objdetect.Detector.addTemplate(opencv_core.MatVector sources,
BytePointer class_id,
opencv_core.Mat object_mask,
opencv_core.Rect bounding_box)
\brief Add new object template.
|
int |
opencv_objdetect.Detector.addTemplate(opencv_core.MatVector sources,
String class_id,
opencv_core.Mat object_mask) |
int |
opencv_objdetect.Detector.addTemplate(opencv_core.MatVector sources,
String class_id,
opencv_core.Mat object_mask,
opencv_core.Rect bounding_box) |
static void |
opencv_highgui.addText(opencv_core.Mat img,
BytePointer text,
opencv_core.Point org,
opencv_core.CvFont font) |
static void |
opencv_highgui.addText(opencv_core.Mat img,
String text,
opencv_core.Point org,
opencv_core.CvFont font) |
void |
opencv_contrib.FabMap.addTraining(opencv_core.Mat queryImgDescriptor) |
void |
opencv_contrib.FabMap2.addTraining(opencv_core.Mat queryImgDescriptors) |
static void |
opencv_core.addWeighted(opencv_core.Mat src1,
double alpha,
opencv_core.Mat src2,
double beta,
double gamma,
opencv_core.Mat dst) |
static void |
opencv_core.addWeighted(opencv_core.Mat src1,
double alpha,
opencv_core.Mat src2,
double beta,
double gamma,
opencv_core.Mat dst,
int dtype)
computes weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma)
|
void |
opencv_stitching.ExposureCompensator.apply(int index,
opencv_core.Point corner,
opencv_core.Mat image,
opencv_core.Mat mask) |
void |
opencv_stitching.NoExposureCompensator.apply(int arg0,
opencv_core.Point arg1,
opencv_core.Mat arg2,
opencv_core.Mat arg3) |
void |
opencv_stitching.GainCompensator.apply(int index,
opencv_core.Point corner,
opencv_core.Mat image,
opencv_core.Mat mask) |
void |
opencv_stitching.BlocksGainCompensator.apply(int index,
opencv_core.Point corner,
opencv_core.Mat image,
opencv_core.Mat mask) |
opencv_core.SVD |
opencv_core.SVD.apply(opencv_core.Mat src) |
int |
opencv_legacy.FernClassifier.apply(opencv_core.Mat patch,
float[] signature) |
int |
opencv_legacy.FernClassifier.apply(opencv_core.Mat patch,
FloatBuffer signature) |
int |
opencv_legacy.FernClassifier.apply(opencv_core.Mat patch,
FloatPointer signature) |
opencv_core.SVD |
opencv_core.SVD.apply(opencv_core.Mat src,
int flags)
the operator that performs SVD.
|
void |
opencv_video.BackgroundSubtractor.apply(opencv_core.Mat image,
opencv_core.Mat fgmask) |
void |
opencv_video.BackgroundSubtractorMOG.apply(opencv_core.Mat image,
opencv_core.Mat fgmask) |
void |
opencv_video.BackgroundSubtractorMOG2.apply(opencv_core.Mat image,
opencv_core.Mat fgmask) |
void |
opencv_video.BackgroundSubtractorGMG.apply(opencv_core.Mat image,
opencv_core.Mat fgmask) |
void |
opencv_imgproc.FilterEngine.apply(opencv_core.Mat src,
opencv_core.Mat dst) |
void |
opencv_imgproc.CLAHE.apply(opencv_core.Mat src,
opencv_core.Mat dst) |
void |
opencv_video.BackgroundSubtractor.apply(opencv_core.Mat image,
opencv_core.Mat fgmask,
double learningRate)
the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image.
|
void |
opencv_video.BackgroundSubtractorMOG.apply(opencv_core.Mat image,
opencv_core.Mat fgmask,
double learningRate)
the update operator
|
void |
opencv_video.BackgroundSubtractorMOG2.apply(opencv_core.Mat image,
opencv_core.Mat fgmask,
double learningRate)
the update operator
|
void |
opencv_video.BackgroundSubtractorGMG.apply(opencv_core.Mat image,
opencv_core.Mat fgmask,
double learningRate)
Performs single-frame background subtraction and builds up a statistical background image
model.
|
opencv_core.PCA |
opencv_core.PCA.apply(opencv_core.Mat data,
opencv_core.Mat mean,
int flags) |
opencv_core.PCA |
opencv_core.PCA.apply(opencv_core.Mat data,
opencv_core.Mat mean,
int flags,
int maxComponents)
operator that performs PCA.
|
void |
opencv_legacy.PatchGenerator.apply(opencv_core.Mat image,
opencv_core.Mat transform,
opencv_core.Mat patch,
opencv_core.Size patchSize,
opencv_core.RNG rng) |
boolean |
opencv_legacy.PlanarObjectDetector.apply(opencv_core.Mat image,
opencv_core.Mat H,
opencv_core.Point2f corners) |
void |
opencv_imgproc.FilterEngine.apply(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Rect srcRoi,
opencv_core.Point dstOfs,
boolean isolated)
applies filter to the specified ROI of the image.
|
void |
opencv_nonfree.SIFT.apply(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_features2d.KeyPoint keypoints)
finds the keypoints using SIFT algorithm
|
void |
opencv_nonfree.SURF.apply(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_features2d.KeyPoint keypoints)
finds the keypoints using fast hessian detector used in SURF
|
void |
opencv_features2d.BRISK.apply(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_features2d.KeyPoint keypoints) |
void |
opencv_features2d.ORB.apply(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_features2d.KeyPoint keypoints) |
void |
opencv_nonfree.SIFT.apply(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat descriptors) |
void |
opencv_nonfree.SURF.apply(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat descriptors) |
void |
opencv_features2d.BRISK.apply(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat descriptors) |
void |
opencv_features2d.ORB.apply(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat descriptors) |
void |
opencv_nonfree.SIFT.apply(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat descriptors,
boolean useProvidedKeypoints)
finds the keypoints and computes descriptors for them using SIFT algorithm.
|
void |
opencv_nonfree.SURF.apply(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat descriptors,
boolean useProvidedKeypoints)
finds the keypoints and computes their descriptors.
|
void |
opencv_features2d.BRISK.apply(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat descriptors,
boolean useProvidedKeypoints) |
void |
opencv_features2d.ORB.apply(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat descriptors,
boolean useProvidedKeypoints) |
int |
opencv_legacy.FernClassifier.apply(opencv_core.Mat img,
opencv_core.Point2f kpt,
float[] signature) |
int |
opencv_legacy.FernClassifier.apply(opencv_core.Mat img,
opencv_core.Point2f kpt,
FloatBuffer signature) |
int |
opencv_legacy.FernClassifier.apply(opencv_core.Mat img,
opencv_core.Point2f kpt,
FloatPointer signature) |
void |
opencv_legacy.PatchGenerator.apply(opencv_core.Mat image,
opencv_core.Point2f pt,
opencv_core.Mat patch,
opencv_core.Size patchSize,
opencv_core.RNG rng) |
void |
opencv_legacy.LDetector.apply(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints) |
void |
opencv_legacy.LDetector.apply(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints,
int maxCount,
boolean scaleCoords) |
void |
opencv_stitching.FeaturesFinder.apply(opencv_core.Mat image,
opencv_stitching.ImageFeatures features) |
void |
opencv_stitching.FeaturesFinder.apply(opencv_core.Mat image,
opencv_stitching.ImageFeatures features,
opencv_core.Rect rois) |
boolean |
opencv_legacy.PlanarObjectDetector.apply(opencv_core.MatVector pyr,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat H,
opencv_core.Point2f corners) |
boolean |
opencv_legacy.PlanarObjectDetector.apply(opencv_core.MatVector pyr,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat H,
opencv_core.Point2f corners,
int[] pairs) |
boolean |
opencv_legacy.PlanarObjectDetector.apply(opencv_core.MatVector pyr,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat H,
opencv_core.Point2f corners,
IntBuffer pairs) |
boolean |
opencv_legacy.PlanarObjectDetector.apply(opencv_core.MatVector pyr,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat H,
opencv_core.Point2f corners,
IntPointer pairs) |
void |
opencv_stitching.FeaturesMatcher.apply(opencv_stitching.ImageFeatures features,
opencv_stitching.MatchesInfo pairwise_matches,
opencv_core.Mat mask) |
static void |
opencv_contrib.applyColorMap(opencv_core.Mat src,
opencv_core.Mat dst,
int colormap) |
static void |
opencv_imgproc.approxPolyDP(opencv_core.Mat curve,
opencv_core.Mat approxCurve,
double epsilon,
boolean closed)
approximates contour or a curve using Douglas-Peucker algorithm
|
static double |
opencv_imgproc.arcLength(opencv_core.Mat curve,
boolean closed)
computes the contour perimeter (closed=true) or a curve length
|
void |
opencv_core.Mat.assignTo(opencv_core.Mat m) |
void |
opencv_core.Mat.assignTo(opencv_core.Mat m,
int type) |
opencv_core.Mat |
opencv_core.PCA.backProject(opencv_core.Mat vec)
reconstructs the original vector from the projection
|
void |
opencv_core.PCA.backProject(opencv_core.Mat vec,
opencv_core.Mat result)
reconstructs the original vector from the projection
|
void |
opencv_core.SVD.backSubst(opencv_core.Mat rhs,
opencv_core.Mat dst)
performs back substitution, so that dst is the solution or pseudo-solution of m*dst = rhs, where m is the decomposed matrix
|
static void |
opencv_core.SVD.backSubst(opencv_core.Mat w,
opencv_core.Mat u,
opencv_core.Mat vt,
opencv_core.Mat rhs,
opencv_core.Mat dst)
performs back substitution
|
static void |
opencv_core.batchDistance(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dist,
int dtype,
opencv_core.Mat nidx) |
static void |
opencv_core.batchDistance(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dist,
int dtype,
opencv_core.Mat nidx,
int normType,
int K,
opencv_core.Mat mask,
int update,
boolean crosscheck)
naive nearest neighbor finder
|
static void |
opencv_imgproc.bilateralFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int d,
double sigmaColor,
double sigmaSpace) |
static void |
opencv_imgproc.bilateralFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int d,
double sigmaColor,
double sigmaSpace,
int borderType)
smooths the image using bilateral filter
|
static void |
opencv_core.bitwise_and(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.bitwise_and(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask)
computes bitwise conjunction of the two arrays (dst = src1 & src2)
|
static void |
opencv_core.bitwise_not(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_core.bitwise_not(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat mask)
inverts each bit of array (dst = ~src)
|
static void |
opencv_core.bitwise_or(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.bitwise_or(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask)
computes bitwise disjunction of the two arrays (dst = src1 | src2)
|
static void |
opencv_core.bitwise_xor(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.bitwise_xor(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask)
computes bitwise exclusive-or of the two arrays (dst = src1 ^ src2)
|
void |
opencv_stitching.Blender.blend(opencv_core.Mat dst,
opencv_core.Mat dst_mask) |
void |
opencv_stitching.FeatherBlender.blend(opencv_core.Mat dst,
opencv_core.Mat dst_mask) |
void |
opencv_stitching.MultiBandBlender.blend(opencv_core.Mat dst,
opencv_core.Mat dst_mask) |
static void |
opencv_imgproc.blur(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize) |
static void |
opencv_imgproc.blur(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize,
opencv_core.Point anchor,
int borderType)
a synonym for normalized box filter
|
static opencv_core.Rect |
opencv_imgproc.boundingRect(opencv_core.Mat points)
computes the bounding rectangle for a contour
|
static void |
opencv_imgproc.boxFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Size ksize) |
static void |
opencv_imgproc.boxFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Size ksize,
opencv_core.Point anchor,
boolean normalize,
int borderType)
smooths the image using the box filter.
|
void |
opencv_core.KDTree.build(opencv_core.Mat points) |
void |
opencv_core.KDTree.build(opencv_core.Mat points,
boolean copyAndReorderPoints)
builds the search tree
|
void |
opencv_core.KDTree.build(opencv_core.Mat points,
opencv_core.Mat labels) |
void |
opencv_core.KDTree.build(opencv_core.Mat points,
opencv_core.Mat labels,
boolean copyAndReorderPoints)
builds the search tree
|
void |
opencv_flann.Index.build(opencv_core.Mat features,
opencv_flann.IndexParams params) |
void |
opencv_flann.Index.build(opencv_core.Mat features,
opencv_flann.IndexParams params,
int distType) |
void |
opencv_nonfree.SIFT.buildGaussianPyramid(opencv_core.Mat base,
opencv_core.MatVector pyr,
int nOctaves) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.GpuMat xmap,
opencv_core.GpuMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailSphericalWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.GpuMat xmap,
opencv_core.GpuMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailCylindricalWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.GpuMat xmap,
opencv_core.GpuMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.GpuMat xmap,
opencv_core.GpuMat ymap) |
opencv_core.Rect |
opencv_stitching.RotationWarper.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailSphericalWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailCylindricalWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.Mat img,
opencv_core.MatVector pyramid,
opencv_core.Size winSize,
int maxLevel) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.Mat img,
opencv_core.MatVector pyramid,
opencv_core.Size winSize,
int maxLevel,
boolean withDerivatives,
int pyrBorder,
int derivBorder,
boolean tryReuseInputImage)
constructs a pyramid which can be used as input for calcOpticalFlowPyrLK
|
static void |
opencv_imgproc.buildPyramid(opencv_core.Mat src,
opencv_core.MatVector dst,
int maxlevel) |
static void |
opencv_imgproc.buildPyramid(opencv_core.Mat src,
opencv_core.MatVector dst,
int maxlevel,
int borderType)
builds the gaussian pyramid using pyrDown() as a basic operation
|
void |
opencv_video.DenseOpticalFlow.calc(opencv_core.Mat I0,
opencv_core.Mat I1,
opencv_core.Mat flow) |
void |
opencv_superres.DenseOpticalFlowExt.calc(opencv_core.Mat frame0,
opencv_core.Mat frame1,
opencv_core.Mat flow1) |
void |
opencv_superres.DenseOpticalFlowExt.calc(opencv_core.Mat frame0,
opencv_core.Mat frame1,
opencv_core.Mat flow1,
opencv_core.Mat flow2) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.Mat hist,
opencv_core.Mat backProject,
float[] ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.Mat hist,
opencv_core.Mat backProject,
float[] ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.SparseMat hist,
opencv_core.Mat backProject,
float[] ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.SparseMat hist,
opencv_core.Mat backProject,
float[] ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.Mat hist,
opencv_core.Mat backProject,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.Mat hist,
opencv_core.Mat backProject,
FloatBuffer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.SparseMat hist,
opencv_core.Mat backProject,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.SparseMat hist,
opencv_core.Mat backProject,
FloatBuffer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.Mat hist,
opencv_core.Mat backProject,
FloatPointer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.Mat hist,
opencv_core.Mat backProject,
FloatPointer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.Mat hist,
opencv_core.Mat backProject,
PointerPointer ranges,
double scale,
boolean uniform)
computes back projection for the set of images
|
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.SparseMat hist,
opencv_core.Mat backProject,
FloatPointer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.SparseMat hist,
opencv_core.Mat backProject,
FloatPointer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.SparseMat hist,
opencv_core.Mat backProject,
PointerPointer ranges,
double scale,
boolean uniform)
computes back projection for the set of images
|
static void |
opencv_imgproc.calcBackProject(opencv_core.MatVector images,
int[] channels,
opencv_core.Mat hist,
opencv_core.Mat dst,
float[] ranges,
double scale) |
static void |
opencv_imgproc.calcBackProject(opencv_core.MatVector images,
IntBuffer channels,
opencv_core.Mat hist,
opencv_core.Mat dst,
FloatBuffer ranges,
double scale) |
static void |
opencv_imgproc.calcBackProject(opencv_core.MatVector images,
IntPointer channels,
opencv_core.Mat hist,
opencv_core.Mat dst,
FloatPointer ranges,
double scale) |
static float |
opencv_videostab.calcBlurriness(opencv_core.Mat frame) |
static void |
opencv_core.calcCovarMatrix(opencv_core.Mat samples,
int nsamples,
opencv_core.Mat covar,
opencv_core.Mat mean,
int flags) |
static void |
opencv_core.calcCovarMatrix(opencv_core.Mat samples,
int nsamples,
opencv_core.Mat covar,
opencv_core.Mat mean,
int flags,
int ctype)
computes covariation matrix of a set of samples
|
static void |
opencv_core.calcCovarMatrix(opencv_core.Mat samples,
opencv_core.Mat covar,
opencv_core.Mat mean,
int flags) |
static void |
opencv_core.calcCovarMatrix(opencv_core.Mat samples,
opencv_core.Mat covar,
opencv_core.Mat mean,
int flags,
int ctype)
computes covariation matrix of a set of samples
|
static void |
opencv_videostab.calcFlowMask(opencv_core.Mat flowX,
opencv_core.Mat flowY,
opencv_core.Mat errors,
float maxError,
opencv_core.Mat mask0,
opencv_core.Mat mask1,
opencv_core.Mat flowMask) |
static double |
opencv_video.calcGlobalOrientation(opencv_core.Mat orientation,
opencv_core.Mat mask,
opencv_core.Mat mhi,
double timestamp,
double duration)
computes the global orientation of the selected motion history image part
|
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int dims,
int[] histSize,
float[] ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int dims,
int[] histSize,
float[] ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.Mat mask,
opencv_core.SparseMat hist,
int dims,
int[] histSize,
float[] ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.Mat mask,
opencv_core.SparseMat hist,
int dims,
int[] histSize,
float[] ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.Mat mask,
opencv_core.SparseMat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.Mat mask,
opencv_core.SparseMat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int dims,
IntPointer histSize,
FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int dims,
IntPointer histSize,
FloatPointer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int dims,
IntPointer histSize,
PointerPointer ranges,
boolean uniform,
boolean accumulate)
computes the joint dense histogram for a set of images.
|
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.Mat mask,
opencv_core.SparseMat hist,
int dims,
IntPointer histSize,
FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.Mat mask,
opencv_core.SparseMat hist,
int dims,
IntPointer histSize,
FloatPointer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntPointer channels,
opencv_core.Mat mask,
opencv_core.SparseMat hist,
int dims,
IntPointer histSize,
PointerPointer ranges,
boolean uniform,
boolean accumulate)
computes the joint sparse histogram for a set of images.
|
static void |
opencv_imgproc.calcHist(opencv_core.MatVector images,
int[] channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int[] histSize,
float[] ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.MatVector images,
int[] channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int[] histSize,
float[] ranges,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.MatVector images,
IntBuffer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
IntBuffer histSize,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.MatVector images,
IntBuffer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
IntBuffer histSize,
FloatBuffer ranges,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.MatVector images,
IntPointer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
IntPointer histSize,
FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.MatVector images,
IntPointer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
IntPointer histSize,
FloatPointer ranges,
boolean accumulate) |
double |
opencv_legacy.CvEM.calcLikelihood(opencv_core.Mat sample) |
static void |
opencv_video.calcMotionGradient(opencv_core.Mat mhi,
opencv_core.Mat mask,
opencv_core.Mat orientation,
double delta1,
double delta2) |
static void |
opencv_video.calcMotionGradient(opencv_core.Mat mhi,
opencv_core.Mat mask,
opencv_core.Mat orientation,
double delta1,
double delta2,
int apertureSize)
computes the motion gradient orientation image from the motion history image
|
static void |
opencv_video.calcOpticalFlowFarneback(opencv_core.Mat prev,
opencv_core.Mat next,
opencv_core.Mat flow,
double pyr_scale,
int levels,
int winsize,
int iterations,
int poly_n,
double poly_sigma,
int flags)
computes dense optical flow using Farneback algorithm
|
static void |
opencv_video.calcOpticalFlowPyrLK(opencv_core.Mat prevImg,
opencv_core.Mat nextImg,
opencv_core.Mat prevPts,
opencv_core.Mat nextPts,
opencv_core.Mat status,
opencv_core.Mat err) |
static void |
opencv_video.calcOpticalFlowPyrLK(opencv_core.Mat prevImg,
opencv_core.Mat nextImg,
opencv_core.Mat prevPts,
opencv_core.Mat nextPts,
opencv_core.Mat status,
opencv_core.Mat err,
opencv_core.Size winSize,
int maxLevel,
opencv_core.TermCriteria criteria,
int flags,
double minEigThreshold)
computes sparse optical flow using multi-scale Lucas-Kanade algorithm
|
static void |
opencv_video.calcOpticalFlowSF(opencv_core.Mat from,
opencv_core.Mat to,
opencv_core.Mat flow,
int layers,
int averaging_block_size,
int max_flow)
computes dense optical flow using Simple Flow algorithm
|
static void |
opencv_video.calcOpticalFlowSF(opencv_core.Mat from,
opencv_core.Mat to,
opencv_core.Mat flow,
int layers,
int averaging_block_size,
int max_flow,
double sigma_dist,
double sigma_color,
int postprocess_window,
double sigma_dist_fix,
double sigma_color_fix,
double occ_thr,
int upscale_averaging_radius,
double upscale_sigma_dist,
double upscale_sigma_color,
double speed_up_thr) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
int flags,
opencv_core.TermCriteria criteria)
finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
|
static boolean |
opencv_stitching.calibrateRotatingCamera(opencv_core.MatVector Hs,
opencv_core.Mat K) |
static void |
opencv_calib3d.calibrationMatrixValues(opencv_core.Mat cameraMatrix,
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
double[] fovx,
double[] fovy,
double[] focalLength,
opencv_core.Point2d principalPoint,
double[] aspectRatio) |
static void |
opencv_calib3d.calibrationMatrixValues(opencv_core.Mat cameraMatrix,
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
DoubleBuffer fovx,
DoubleBuffer fovy,
DoubleBuffer focalLength,
opencv_core.Point2d principalPoint,
DoubleBuffer aspectRatio) |
static void |
opencv_calib3d.calibrationMatrixValues(opencv_core.Mat cameraMatrix,
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
DoublePointer fovx,
DoublePointer fovy,
DoublePointer focalLength,
opencv_core.Point2d principalPoint,
DoublePointer aspectRatio)
computes several useful camera characteristics from the camera matrix, camera frame resolution and the physical sensor size.
|
void |
opencv_contrib.LevMarqSparse.Fjac_int_int_Mat_Mat_Mat_Mat_Pointer.call(int i,
int j,
opencv_core.Mat point_params,
opencv_core.Mat cam_params,
opencv_core.Mat A,
opencv_core.Mat B,
Pointer data) |
void |
opencv_contrib.LevMarqSparse.Func_int_int_Mat_Mat_Mat_Pointer.call(int i,
int j,
opencv_core.Mat point_params,
opencv_core.Mat cam_params,
opencv_core.Mat estim,
Pointer data) |
static opencv_core.RotatedRect |
opencv_video.CamShift(opencv_core.Mat probImage,
opencv_core.Rect window,
opencv_core.TermCriteria criteria)
updates the object tracking window using CAMSHIFT algorithm
|
static void |
opencv_imgproc.Canny(opencv_core.Mat image,
opencv_core.Mat edges,
double threshold1,
double threshold2) |
static void |
opencv_imgproc.Canny(opencv_core.Mat image,
opencv_core.Mat edges,
double threshold1,
double threshold2,
int apertureSize,
boolean L2gradient)
applies Canny edge detector and produces the edge map.
|
static void |
opencv_core.cartToPolar(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat magnitude,
opencv_core.Mat angle) |
static void |
opencv_core.cartToPolar(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat magnitude,
opencv_core.Mat angle,
boolean angleInDegrees)
converts Cartesian coordinates to polar
|
static int |
opencv_contrib.chamerMatching(opencv_core.Mat img,
opencv_core.Mat templ,
opencv_core.PointVectorVector results,
float[] cost) |
static int |
opencv_contrib.chamerMatching(opencv_core.Mat img,
opencv_core.Mat templ,
opencv_core.PointVectorVector results,
float[] cost,
double templScale,
int maxMatches,
double minMatchDistance,
int padX,
int padY,
int scales,
double minScale,
double maxScale,
double orientationWeight,
double truncate) |
static int |
opencv_contrib.chamerMatching(opencv_core.Mat img,
opencv_core.Mat templ,
opencv_core.PointVectorVector results,
FloatBuffer cost) |
static int |
opencv_contrib.chamerMatching(opencv_core.Mat img,
opencv_core.Mat templ,
opencv_core.PointVectorVector results,
FloatBuffer cost,
double templScale,
int maxMatches,
double minMatchDistance,
int padX,
int padY,
int scales,
double minScale,
double maxScale,
double orientationWeight,
double truncate) |
static int |
opencv_contrib.chamerMatching(opencv_core.Mat img,
opencv_core.Mat templ,
opencv_core.PointVectorVector results,
FloatPointer cost) |
static int |
opencv_contrib.chamerMatching(opencv_core.Mat img,
opencv_core.Mat templ,
opencv_core.PointVectorVector results,
FloatPointer cost,
double templScale,
int maxMatches,
double minMatchDistance,
int padX,
int padY,
int scales,
double minScale,
double maxScale,
double orientationWeight,
double truncate) |
static boolean |
opencv_core.checkRange(opencv_core.Mat a) |
static boolean |
opencv_core.checkRange(opencv_core.Mat a,
boolean quiet,
opencv_core.Point pos,
double minVal,
double maxVal)
checks that each matrix element is within the specified range.
|
static void |
opencv_core.circle(opencv_core.Mat img,
opencv_core.Point center,
int radius,
opencv_core.Scalar color) |
static void |
opencv_core.circle(opencv_core.Mat img,
opencv_core.Point center,
int radius,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
draws the circle outline or a solid circle in the image
|
void |
opencv_features2d.GenericDescriptorMatcher.classify(opencv_core.Mat queryImage,
opencv_features2d.KeyPoint queryKeypoints) |
void |
opencv_features2d.GenericDescriptorMatcher.classify(opencv_core.Mat queryImage,
opencv_features2d.KeyPoint queryKeypoints,
opencv_core.Mat trainImage,
opencv_features2d.KeyPoint trainKeypoints) |
opencv_core.Mat |
opencv_contrib.BOWMSCTrainer.cluster(opencv_core.Mat descriptors) |
opencv_core.Mat |
opencv_features2d.BOWTrainer.cluster(opencv_core.Mat descriptors) |
opencv_core.Mat |
opencv_features2d.BOWKMeansTrainer.cluster(opencv_core.Mat descriptors) |
void |
opencv_contrib.FabMap.compare(opencv_core.Mat queryImgDescriptor,
opencv_contrib.IMatch matches) |
void |
opencv_contrib.FabMap.compare(opencv_core.Mat queryImgDescriptor,
opencv_contrib.IMatch matches,
boolean addQuery,
opencv_core.Mat mask) |
void |
opencv_contrib.FabMap.compare(opencv_core.Mat queryImgDescriptor,
opencv_core.Mat testImgDescriptors,
opencv_contrib.IMatch matches) |
void |
opencv_contrib.FabMap.compare(opencv_core.Mat queryImgDescriptor,
opencv_core.Mat testImgDescriptors,
opencv_contrib.IMatch matches,
opencv_core.Mat mask) |
static void |
opencv_core.compare(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
int cmpop)
compares elements of two arrays (dst = src1
|
void |
opencv_contrib.FabMap.compare(opencv_core.Mat queryImgDescriptor,
opencv_core.MatVector testImgDescriptors,
opencv_contrib.IMatch matches) |
void |
opencv_contrib.FabMap.compare(opencv_core.Mat queryImgDescriptor,
opencv_core.MatVector testImgDescriptors,
opencv_contrib.IMatch matches,
opencv_core.Mat mask) |
void |
opencv_contrib.FabMap.compare(opencv_core.MatVector queryImgDescriptors,
opencv_contrib.IMatch matches,
boolean addQuery,
opencv_core.Mat mask) |
void |
opencv_contrib.FabMap.compare(opencv_core.MatVector queryImgDescriptors,
opencv_core.MatVector testImgDescriptors,
opencv_contrib.IMatch matches,
opencv_core.Mat mask) |
static double |
opencv_imgproc.compareHist(opencv_core.Mat H1,
opencv_core.Mat H2,
int method)
compares two histograms stored in dense arrays
|
static void |
opencv_videostab.completeFrameAccordingToFlow(opencv_core.Mat flowMask,
opencv_core.Mat flowX,
opencv_core.Mat flowY,
opencv_core.Mat frame1,
opencv_core.Mat mask1,
float distThresh,
opencv_core.Mat frame0,
opencv_core.Mat mask0) |
static void |
opencv_core.completeSymm(opencv_core.Mat mtx) |
static void |
opencv_core.completeSymm(opencv_core.Mat mtx,
boolean lowerToUpper)
extends the symmetrical matrix from the lower half or from the upper half
|
int |
opencv_stitching.Stitcher.composePanorama(opencv_core.Mat pano) |
int |
opencv_stitching.Stitcher.composePanorama(opencv_core.Mat images,
opencv_core.Mat pano) |
static void |
opencv_calib3d.composeRT(opencv_core.Mat rvec1,
opencv_core.Mat tvec1,
opencv_core.Mat rvec2,
opencv_core.Mat tvec2,
opencv_core.Mat rvec3,
opencv_core.Mat tvec3) |
static void |
opencv_calib3d.composeRT(opencv_core.Mat rvec1,
opencv_core.Mat tvec1,
opencv_core.Mat rvec2,
opencv_core.Mat tvec2,
opencv_core.Mat rvec3,
opencv_core.Mat tvec3,
opencv_core.Mat dr3dr1,
opencv_core.Mat dr3dt1,
opencv_core.Mat dr3dr2,
opencv_core.Mat dr3dt2,
opencv_core.Mat dt3dr1,
opencv_core.Mat dt3dt1,
opencv_core.Mat dt3dr2,
opencv_core.Mat dt3dt2)
composes 2 [R|t] transformations together.
|
void |
opencv_contrib.SelfSimDescriptor.compute(opencv_core.Mat img,
float[] descriptors) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
float[] descriptors) |
void |
opencv_contrib.SelfSimDescriptor.compute(opencv_core.Mat img,
float[] descriptors,
opencv_core.Size winStride,
opencv_core.Point locations) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
float[] descriptors,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.Point locations) |
void |
opencv_contrib.SelfSimDescriptor.compute(opencv_core.Mat img,
FloatBuffer descriptors) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
FloatBuffer descriptors) |
void |
opencv_contrib.SelfSimDescriptor.compute(opencv_core.Mat img,
FloatBuffer descriptors,
opencv_core.Size winStride,
opencv_core.Point locations) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
FloatBuffer descriptors,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.Point locations) |
void |
opencv_contrib.SelfSimDescriptor.compute(opencv_core.Mat img,
FloatPointer descriptors) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
FloatPointer descriptors) |
void |
opencv_contrib.SelfSimDescriptor.compute(opencv_core.Mat img,
FloatPointer descriptors,
opencv_core.Size winStride,
opencv_core.Point locations) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
FloatPointer descriptors,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.Point locations) |
static void |
opencv_core.SVD.compute(opencv_core.Mat src,
opencv_core.Mat w) |
static void |
opencv_core.SVD.compute(opencv_core.Mat src,
opencv_core.Mat w,
int flags)
computes singular values of a matrix
|
void |
opencv_contrib.StereoVar.compute(opencv_core.Mat left,
opencv_core.Mat right,
opencv_core.Mat disp)
the stereo correspondence operator that computes disparity map for the specified rectified stereo pair
|
void |
opencv_calib3d.StereoBM.compute(opencv_core.Mat left,
opencv_core.Mat right,
opencv_core.Mat disparity) |
void |
opencv_calib3d.StereoSGBM.compute(opencv_core.Mat left,
opencv_core.Mat right,
opencv_core.Mat disp)
the stereo correspondence operator that computes disparity map for the specified rectified stereo pair
|
void |
opencv_calib3d.StereoBM.compute(opencv_core.Mat left,
opencv_core.Mat right,
opencv_core.Mat disparity,
int disptype)
the stereo correspondence operator.
|
static void |
opencv_core.SVD.compute(opencv_core.Mat src,
opencv_core.Mat w,
opencv_core.Mat u,
opencv_core.Mat vt) |
static void |
opencv_core.SVD.compute(opencv_core.Mat src,
opencv_core.Mat w,
opencv_core.Mat u,
opencv_core.Mat vt,
int flags)
decomposes matrix and stores the results to user-provided matrices
|
void |
opencv_features2d.DescriptorExtractor.compute(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat descriptors) |
void |
opencv_features2d.Feature2D.compute(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat descriptors) |
void |
opencv_features2d.BOWImgDescriptorExtractor.compute(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat imgDescriptor) |
void |
opencv_features2d.BOWImgDescriptorExtractor.compute(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat imgDescriptor,
opencv_core.IntVectorVector pointIdxsOfClusters,
opencv_core.Mat descriptors) |
void |
opencv_contrib.LDA.compute(opencv_core.MatVector src,
opencv_core.Mat labels)
Compute the discriminants for data in src and labels.
|
static void |
opencv_calib3d.computeCorrespondEpilines(opencv_core.Mat points,
int whichImage,
opencv_core.Mat F,
opencv_core.Mat lines)
finds coordinates of epipolar lines corresponding the specified points
|
void |
opencv_objdetect.HOGDescriptor.computeGradient(opencv_core.Mat img,
opencv_core.Mat grad,
opencv_core.Mat angleOfs) |
void |
opencv_objdetect.HOGDescriptor.computeGradient(opencv_core.Mat img,
opencv_core.Mat grad,
opencv_core.Mat angleOfs,
opencv_core.Size paddingTL,
opencv_core.Size paddingBR) |
void |
opencv_contrib.SelfSimDescriptor.computeLogPolarMapping(opencv_core.Mat mappingMask) |
opencv_core.PCA |
opencv_core.PCA.computeVar(opencv_core.Mat data,
opencv_core.Mat mean,
int flags,
double retainedVariance) |
static double |
opencv_imgproc.contourArea(opencv_core.Mat contour) |
static double |
opencv_imgproc.contourArea(opencv_core.Mat contour,
boolean oriented)
computes the contour area
|
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.controlMatrix(opencv_core.Mat controlMatrix) |
static void |
opencv_imgproc.convertMaps(opencv_core.Mat map1,
opencv_core.Mat map2,
opencv_core.Mat dstmap1,
opencv_core.Mat dstmap2,
int dstmap1type) |
static void |
opencv_imgproc.convertMaps(opencv_core.Mat map1,
opencv_core.Mat map2,
opencv_core.Mat dstmap1,
opencv_core.Mat dstmap2,
int dstmap1type,
boolean nninterpolation)
converts maps for remap from floating-point to fixed-point format or backwards
|
static void |
opencv_calib3d.convertPointsFromHomogeneous(opencv_core.Mat src,
opencv_core.Mat dst)
converts point coordinates from homogeneous to normal pixel coordinates ((x,y,z)->(x/z, y/z))
|
static void |
opencv_calib3d.convertPointsHomogeneous(opencv_core.Mat src,
opencv_core.Mat dst)
for backward compatibility
|
static void |
opencv_calib3d.convertPointsToHomogeneous(opencv_core.Mat src,
opencv_core.Mat dst)
converts point coordinates from normal pixel coordinates to homogeneous coordinates ((x,y)->(x,y,1))
|
static void |
opencv_core.convertScaleAbs(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_core.convertScaleAbs(opencv_core.Mat src,
opencv_core.Mat dst,
double alpha,
double beta)
scales array elements, computes absolute values and converts the results to 8-bit unsigned integers: dst(i)=saturate_cast
|
void |
opencv_core.Mat.convertTo(opencv_core.Mat m,
int rtype) |
void |
opencv_core.SparseMat.convertTo(opencv_core.Mat m,
int rtype) |
void |
opencv_core.Mat.convertTo(opencv_core.Mat m,
int rtype,
double alpha,
double beta)
converts matrix to another datatype with optional scalng.
|
void |
opencv_core.SparseMat.convertTo(opencv_core.Mat m,
int rtype,
double alpha,
double beta)
converts sparse matrix to dense n-dim matrix with optional type conversion and scaling.
|
static void |
opencv_imgproc.convexHull(opencv_core.Mat points,
opencv_core.Mat hull) |
static void |
opencv_imgproc.convexHull(opencv_core.Mat points,
opencv_core.Mat hull,
boolean clockwise,
boolean returnPoints)
computes convex hull for a set of 2D points.
|
static void |
opencv_imgproc.convexityDefects(opencv_core.Mat contour,
opencv_core.Mat convexhull,
opencv_core.Mat convexityDefects)
computes the contour convexity defects
|
static void |
opencv_imgproc.copyMakeBorder(opencv_core.Mat src,
opencv_core.Mat dst,
int top,
int bottom,
int left,
int right,
int borderType) |
static void |
opencv_imgproc.copyMakeBorder(opencv_core.Mat src,
opencv_core.Mat dst,
int top,
int bottom,
int left,
int right,
int borderType,
opencv_core.Scalar value)
copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode
|
void |
opencv_core.Mat.copySize(opencv_core.Mat m)
internal use function; properly re-allocates _size, _step arrays
|
void |
opencv_core.Mat.copyTo(opencv_core.Mat m)
copies the matrix content to "m".
|
void |
opencv_core.SparseMat.copyTo(opencv_core.Mat m)
converts sparse matrix to dense matrix.
|
void |
opencv_core.Mat.copyTo(opencv_core.Mat m,
opencv_core.Mat mask)
copies those matrix elements to "m" that are marked with non-zero mask elements.
|
static void |
opencv_imgproc.cornerEigenValsAndVecs(opencv_core.Mat src,
opencv_core.Mat dst,
int blockSize,
int ksize) |
static void |
opencv_imgproc.cornerEigenValsAndVecs(opencv_core.Mat src,
opencv_core.Mat dst,
int blockSize,
int ksize,
int borderType)
computes both eigenvalues and the eigenvectors of 2x2 derivative covariation matrix at each pixel.
|
static void |
opencv_imgproc.cornerHarris(opencv_core.Mat src,
opencv_core.Mat dst,
int blockSize,
int ksize,
double k) |
static void |
opencv_imgproc.cornerHarris(opencv_core.Mat src,
opencv_core.Mat dst,
int blockSize,
int ksize,
double k,
int borderType)
computes Harris cornerness criteria at each image pixel
|
static void |
opencv_imgproc.cornerMinEigenVal(opencv_core.Mat src,
opencv_core.Mat dst,
int blockSize) |
static void |
opencv_imgproc.cornerMinEigenVal(opencv_core.Mat src,
opencv_core.Mat dst,
int blockSize,
int ksize,
int borderType)
computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria
|
static void |
opencv_imgproc.cornerSubPix(opencv_core.Mat image,
opencv_core.Mat corners,
opencv_core.Size winSize,
opencv_core.Size zeroZone,
opencv_core.TermCriteria criteria)
adjusts the corner locations with sub-pixel accuracy to maximize the certain cornerness criteria
|
opencv_core.Mat |
opencv_video.KalmanFilter.correct(opencv_core.Mat measurement)
updates the predicted state from the measurement
|
static void |
opencv_calib3d.correctMatches(opencv_core.Mat F,
opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat newPoints1,
opencv_core.Mat newPoints2) |
static int |
opencv_core.countNonZero(opencv_core.Mat src)
computes the number of nonzero array elements
|
void |
opencv_ml.CvANN_MLP.create(opencv_core.Mat layerSizes) |
void |
opencv_ml.CvANN_MLP.create(opencv_core.Mat layerSizes,
int activateFunc,
double fparam1,
double fparam2) |
static void |
opencv_imgproc.createHanningWindow(opencv_core.Mat dst,
opencv_core.Size winSize,
int type) |
static void |
opencv_stitching.createLaplacePyr(opencv_core.Mat img,
int num_levels,
opencv_core.MatVector pyr) |
static void |
opencv_stitching.createLaplacePyrGpu(opencv_core.Mat img,
int num_levels,
opencv_core.MatVector pyr) |
static opencv_imgproc.FilterEngine |
opencv_imgproc.createLinearFilter(int srcType,
int dstType,
opencv_core.Mat kernel) |
static opencv_imgproc.FilterEngine |
opencv_imgproc.createLinearFilter(int srcType,
int dstType,
opencv_core.Mat kernel,
opencv_core.Point _anchor,
double delta,
int rowBorderType,
int columnBorderType,
opencv_core.Scalar borderValue)
returns the non-separable linear filter engine
|
static opencv_imgproc.FilterEngine |
opencv_imgproc.createMorphologyFilter(int op,
int type,
opencv_core.Mat kernel) |
static opencv_imgproc.FilterEngine |
opencv_imgproc.createMorphologyFilter(int op,
int type,
opencv_core.Mat kernel,
opencv_core.Point anchor,
int rowBorderType,
int columnBorderType,
opencv_core.Scalar borderValue)
returns morphological filter engine.
|
static opencv_imgproc.FilterEngine |
opencv_imgproc.createSeparableLinearFilter(int srcType,
int dstType,
opencv_core.Mat rowKernel,
opencv_core.Mat columnKernel) |
static opencv_imgproc.FilterEngine |
opencv_imgproc.createSeparableLinearFilter(int srcType,
int dstType,
opencv_core.Mat rowKernel,
opencv_core.Mat columnKernel,
opencv_core.Point anchor,
double delta,
int rowBorderType,
int columnBorderType,
opencv_core.Scalar borderValue)
returns the separable linear filter engine
|
static void |
opencv_stitching.createWeightMap(opencv_core.Mat mask,
float sharpness,
opencv_core.Mat weight) |
opencv_core.Mat |
opencv_core.Mat.cross(opencv_core.Mat m)
computes cross-product of 2 3D vectors
|
static void |
opencv_imgproc.cvtColor(opencv_core.Mat src,
opencv_core.Mat dst,
int code) |
static void |
opencv_imgproc.cvtColor(opencv_core.Mat src,
opencv_core.Mat dst,
int code,
int dstCn)
converts image from one color space to another
|
static void |
opencv_core.dct(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_core.dct(opencv_core.Mat src,
opencv_core.Mat dst,
int flags)
performs forward or inverse 1D or 2D Discrete Cosine Transformation
|
void |
opencv_videostab.DeblurerBase.deblur(int idx,
opencv_core.Mat frame) |
void |
opencv_videostab.NullDeblurer.deblur(int arg0,
opencv_core.Mat arg1) |
void |
opencv_videostab.WeightingDeblurer.deblur(int idx,
opencv_core.Mat frame) |
static void |
opencv_calib3d.decomposeProjectionMatrix(opencv_core.Mat projMatrix,
opencv_core.Mat cameraMatrix,
opencv_core.Mat rotMatrix,
opencv_core.Mat transVect) |
static void |
opencv_calib3d.decomposeProjectionMatrix(opencv_core.Mat projMatrix,
opencv_core.Mat cameraMatrix,
opencv_core.Mat rotMatrix,
opencv_core.Mat transVect,
opencv_core.Mat rotMatrixX,
opencv_core.Mat rotMatrixY,
opencv_core.Mat rotMatrixZ,
opencv_core.Mat eulerAngles)
Decomposes the projection matrix into camera matrix and the rotation martix and the translation vector
|
opencv_stitching.ImageFeatures |
opencv_stitching.ImageFeatures.descriptors(opencv_core.Mat descriptors) |
void |
opencv_imgproc.GeneralizedHough.detect(opencv_core.Mat image,
opencv_core.Mat positions) |
void |
opencv_imgproc.GeneralizedHough.detect(opencv_core.Mat image,
opencv_core.Mat positions,
opencv_core.Mat votes,
int cannyThreshold)
find template on image
|
void |
opencv_imgproc.GeneralizedHough.detect(opencv_core.Mat edges,
opencv_core.Mat dx,
opencv_core.Mat dy,
opencv_core.Mat positions) |
void |
opencv_imgproc.GeneralizedHough.detect(opencv_core.Mat edges,
opencv_core.Mat dx,
opencv_core.Mat dy,
opencv_core.Mat positions,
opencv_core.Mat votes) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations,
double[] weights) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations,
double[] weights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.Point searchLocations) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations,
DoubleBuffer weights) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations,
DoubleBuffer weights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.Point searchLocations) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.Point searchLocations) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations,
DoublePointer weights) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations,
DoublePointer weights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.Point searchLocations) |
void |
opencv_features2d.MSER.detect(opencv_core.Mat image,
opencv_core.PointVectorVector msers) |
void |
opencv_features2d.MSER.detect(opencv_core.Mat image,
opencv_core.PointVectorVector msers,
opencv_core.Mat mask)
the operator that extracts the MSERs from the image or the specific part of it
|
void |
opencv_features2d.FeatureDetector.detect(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints) |
void |
opencv_features2d.StarDetector.detect(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints)
finds the keypoints in the image
|
void |
opencv_features2d.FeatureDetector.detect(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat mask) |
void |
opencv_objdetect.LatentSvmDetector.detect(opencv_core.Mat image,
opencv_objdetect.LatentSvmDetector.ObjectDetection objectDetections) |
void |
opencv_objdetect.LatentSvmDetector.detect(opencv_core.Mat image,
opencv_objdetect.LatentSvmDetector.ObjectDetection objectDetections,
float overlapThreshold,
int numThreads) |
void |
opencv_features2d.Feature2D.detectAndCompute(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat descriptors) |
void |
opencv_features2d.Feature2D.detectAndCompute(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat descriptors,
boolean useProvidedKeypoints) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.Rect objects) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.Rect foundLocations) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.Rect foundLocations,
double[] foundWeights) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.Rect foundLocations,
double[] foundWeights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.Rect foundLocations,
DoubleBuffer foundWeights) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.Rect foundLocations,
DoubleBuffer foundWeights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.Rect objects,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.Rect foundLocations,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.Rect foundLocations,
DoublePointer foundWeights) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.Rect foundLocations,
DoublePointer foundWeights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.Rect objects,
int[] rejectLevels,
double[] levelWeights) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.Rect objects,
int[] rejectLevels,
double[] levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.Rect objects,
IntBuffer rejectLevels,
DoubleBuffer levelWeights) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.Rect objects,
IntBuffer rejectLevels,
DoubleBuffer levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.Rect objects,
IntPointer rejectLevels,
DoublePointer levelWeights) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.Rect objects,
IntPointer rejectLevels,
DoublePointer levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScaleROI(opencv_core.Mat img,
opencv_core.Rect foundLocations,
opencv_objdetect.DetectionROI locations) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScaleROI(opencv_core.Mat img,
opencv_core.Rect foundLocations,
opencv_objdetect.DetectionROI locations,
double hitThreshold,
int groupThreshold) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.Point locations,
opencv_core.Point foundLocations,
double[] confidences) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.Point locations,
opencv_core.Point foundLocations,
double[] confidences,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.Point locations,
opencv_core.Point foundLocations,
DoubleBuffer confidences) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.Point locations,
opencv_core.Point foundLocations,
DoubleBuffer confidences,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.Point locations,
opencv_core.Point foundLocations,
DoublePointer confidences) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.Point locations,
opencv_core.Point foundLocations,
DoublePointer confidences,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding) |
static double |
opencv_core.determinant(opencv_core.Mat mtx)
computes determinant of a square matrix
|
static void |
opencv_core.dft(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_core.dft(opencv_core.Mat src,
opencv_core.Mat dst,
int flags,
int nonzeroRows)
performs forward or inverse 1D or 2D Discrete Fourier Transformation
|
static opencv_core.Mat |
opencv_core.Mat.diag(opencv_core.Mat d)
constructs a square diagonal matrix which main diagonal is vector "d"
|
static void |
opencv_imgproc.dilate(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat kernel) |
static void |
opencv_imgproc.dilate(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat kernel,
opencv_core.Point anchor,
int iterations,
int borderType,
opencv_core.Scalar borderValue)
dilates the image (applies the local maximum operator)
|
opencv_contrib.CvFeatureTracker |
opencv_contrib.CvFeatureTracker.disp_matches(opencv_core.Mat disp_matches) |
static void |
opencv_imgproc.distanceTransform(opencv_core.Mat src,
opencv_core.Mat dst,
int distanceType,
int maskSize)
computes the distance transform map
|
static void |
opencv_imgproc.distanceTransformWithLabels(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat labels,
int distanceType,
int maskSize) |
static void |
opencv_imgproc.distanceTransformWithLabels(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat labels,
int distanceType,
int maskSize,
int labelType)
builds the discrete Voronoi diagram
|
static void |
opencv_core.divide(double scale,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.divide(double scale,
opencv_core.Mat src2,
opencv_core.Mat dst,
int dtype)
computes element-wise weighted reciprocal of an array (dst = scale/src2)
|
static void |
opencv_core.divide(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.divide(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
double scale,
int dtype)
computes element-wise weighted quotient of the two arrays (dst = scale*src1/src2)
|
double |
opencv_core.Mat.dot(opencv_core.Mat m)
computes dot-product
|
static void |
opencv_calib3d.drawChessboardCorners(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat corners,
boolean patternWasFound)
draws the checkerboard pattern (found or partly found) in the image
|
static void |
opencv_imgproc.drawContours(opencv_core.Mat image,
opencv_core.MatVector contours,
int contourIdx,
opencv_core.Scalar color) |
static void |
opencv_imgproc.drawContours(opencv_core.Mat image,
opencv_core.MatVector contours,
int contourIdx,
opencv_core.Scalar color,
int thickness,
int lineType,
opencv_core.Mat hierarchy,
int maxLevel,
opencv_core.Point offset)
draws contours in the image
|
static void |
opencv_objdetect.drawDataMatrixCodes(opencv_core.Mat image,
opencv_core.StringVector codes,
opencv_core.Mat corners) |
static void |
opencv_features2d.drawKeypoints(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat outImage) |
static void |
opencv_features2d.drawKeypoints(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints,
opencv_core.Mat outImage,
opencv_core.Scalar color,
int flags) |
static void |
opencv_features2d.drawMatches(opencv_core.Mat img1,
opencv_features2d.KeyPoint keypoints1,
opencv_core.Mat img2,
opencv_features2d.KeyPoint keypoints2,
opencv_features2d.DMatch matches1to2,
opencv_core.Mat outImg) |
static void |
opencv_features2d.drawMatches(opencv_core.Mat img1,
opencv_features2d.KeyPoint keypoints1,
opencv_core.Mat img2,
opencv_features2d.KeyPoint keypoints2,
opencv_features2d.DMatch matches1to2,
opencv_core.Mat outImg,
opencv_core.Scalar matchColor,
opencv_core.Scalar singlePointColor,
byte[] matchesMask,
int flags) |
static void |
opencv_features2d.drawMatches(opencv_core.Mat img1,
opencv_features2d.KeyPoint keypoints1,
opencv_core.Mat img2,
opencv_features2d.KeyPoint keypoints2,
opencv_features2d.DMatch matches1to2,
opencv_core.Mat outImg,
opencv_core.Scalar matchColor,
opencv_core.Scalar singlePointColor,
ByteBuffer matchesMask,
int flags) |
static void |
opencv_features2d.drawMatches(opencv_core.Mat img1,
opencv_features2d.KeyPoint keypoints1,
opencv_core.Mat img2,
opencv_features2d.KeyPoint keypoints2,
opencv_features2d.DMatch matches1to2,
opencv_core.Mat outImg,
opencv_core.Scalar matchColor,
opencv_core.Scalar singlePointColor,
BytePointer matchesMask,
int flags) |
static void |
opencv_features2d.drawMatches(opencv_core.Mat img1,
opencv_features2d.KeyPoint keypoints1,
opencv_core.Mat img2,
opencv_features2d.KeyPoint keypoints2,
opencv_features2d.DMatchVectorVector matches1to2,
opencv_core.Mat outImg) |
static void |
opencv_features2d.drawMatches(opencv_core.Mat img1,
opencv_features2d.KeyPoint keypoints1,
opencv_core.Mat img2,
opencv_features2d.KeyPoint keypoints2,
opencv_features2d.DMatchVectorVector matches1to2,
opencv_core.Mat outImg,
opencv_core.Scalar matchColor,
opencv_core.Scalar singlePointColor,
opencv_core.ByteVectorVector matchesMask,
int flags) |
static boolean |
opencv_core.eigen(opencv_core.Mat src,
boolean computeEigenvectors,
opencv_core.Mat eigenvalues,
opencv_core.Mat eigenvectors) |
static boolean |
opencv_core.eigen(opencv_core.Mat src,
opencv_core.Mat eigenvalues) |
static boolean |
opencv_core.eigen(opencv_core.Mat src,
opencv_core.Mat eigenvalues,
int lowindex,
int highindex)
finds eigenvalues of a symmetric matrix
|
static boolean |
opencv_core.eigen(opencv_core.Mat src,
opencv_core.Mat eigenvalues,
opencv_core.Mat eigenvectors) |
static boolean |
opencv_core.eigen(opencv_core.Mat src,
opencv_core.Mat eigenvalues,
opencv_core.Mat eigenvectors,
int lowindex,
int highindex)
finds eigenvalues and eigenvectors of a symmetric matrix
|
opencv_core.PCA |
opencv_core.PCA.eigenvalues(opencv_core.Mat eigenvalues) |
opencv_core.PCA |
opencv_core.PCA.eigenvectors(opencv_core.Mat eigenvectors) |
static void |
opencv_core.ellipse(opencv_core.Mat img,
opencv_core.Point center,
opencv_core.Size axes,
double angle,
double startAngle,
double endAngle,
opencv_core.Scalar color) |
static void |
opencv_core.ellipse(opencv_core.Mat img,
opencv_core.Point center,
opencv_core.Size axes,
double angle,
double startAngle,
double endAngle,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
draws an elliptic arc, ellipse sector or a rotated ellipse in the image
|
static void |
opencv_core.ellipse(opencv_core.Mat img,
opencv_core.RotatedRect box,
opencv_core.Scalar color) |
static void |
opencv_core.ellipse(opencv_core.Mat img,
opencv_core.RotatedRect box,
opencv_core.Scalar color,
int thickness,
int lineType)
draws a rotated ellipse in the image
|
static float |
opencv_imgproc.EMD(opencv_core.Mat signature1,
opencv_core.Mat signature2,
int distType) |
static float |
opencv_imgproc.EMD(opencv_core.Mat signature1,
opencv_core.Mat signature2,
int distType,
opencv_core.Mat cost,
float[] lowerBound,
opencv_core.Mat flow) |
static float |
opencv_imgproc.EMD(opencv_core.Mat signature1,
opencv_core.Mat signature2,
int distType,
opencv_core.Mat cost,
FloatBuffer lowerBound,
opencv_core.Mat flow) |
static float |
opencv_imgproc.EMD(opencv_core.Mat signature1,
opencv_core.Mat signature2,
int distType,
opencv_core.Mat cost,
FloatPointer lowerBound,
opencv_core.Mat flow) |
static opencv_core.Mat |
opencv_videostab.ensureInclusionConstraint(opencv_core.Mat M,
opencv_core.Size size,
float trimRatio) |
static void |
opencv_imgproc.equalizeHist(opencv_core.Mat src,
opencv_core.Mat dst)
normalizes the grayscale image brightness and contrast by normalizing its histogram
|
static void |
opencv_imgproc.erode(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat kernel) |
static void |
opencv_imgproc.erode(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat kernel,
opencv_core.Point anchor,
int iterations,
int borderType,
opencv_core.Scalar borderValue)
erodes the image (applies the local minimum operator)
|
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.errorCovPost(opencv_core.Mat errorCovPost) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.errorCovPre(opencv_core.Mat errorCovPre) |
opencv_core.Mat |
opencv_videostab.IGlobalMotionEstimator.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1) |
opencv_core.Mat |
opencv_videostab.PyrLkRobustMotionEstimator.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1) |
static int |
opencv_calib3d.estimateAffine3D(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat out,
opencv_core.Mat inliers) |
static int |
opencv_calib3d.estimateAffine3D(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat out,
opencv_core.Mat inliers,
double ransacThreshold,
double confidence) |
static float |
opencv_videostab.estimateOptimalTrimRatio(opencv_core.Mat M,
opencv_core.Size size) |
static opencv_core.Mat |
opencv_video.estimateRigidTransform(opencv_core.Mat src,
opencv_core.Mat dst,
boolean fullAffine)
estimates the best-fit Euqcidean, similarity, affine or perspective transformation
// that maps one 2D point set to another or one image to another.
|
int |
opencv_stitching.Stitcher.estimateTransform(opencv_core.Mat images) |
int |
opencv_stitching.Stitcher.estimateTransform(opencv_core.Mat images,
opencv_core.RectVectorVector rois) |
static void |
opencv_features2d.evaluateFeatureDetector(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_features2d.KeyPoint keypoints1,
opencv_features2d.KeyPoint keypoints2,
float[] repeatability,
int[] correspCount) |
static void |
opencv_features2d.evaluateFeatureDetector(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_features2d.KeyPoint keypoints1,
opencv_features2d.KeyPoint keypoints2,
float[] repeatability,
int[] correspCount,
opencv_features2d.FeatureDetector fdetector) |
static void |
opencv_features2d.evaluateFeatureDetector(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_features2d.KeyPoint keypoints1,
opencv_features2d.KeyPoint keypoints2,
FloatBuffer repeatability,
IntBuffer correspCount) |
static void |
opencv_features2d.evaluateFeatureDetector(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_features2d.KeyPoint keypoints1,
opencv_features2d.KeyPoint keypoints2,
FloatBuffer repeatability,
IntBuffer correspCount,
opencv_features2d.FeatureDetector fdetector) |
static void |
opencv_features2d.evaluateFeatureDetector(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_features2d.KeyPoint keypoints1,
opencv_features2d.KeyPoint keypoints2,
FloatPointer repeatability,
IntPointer correspCount) |
static void |
opencv_features2d.evaluateFeatureDetector(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_features2d.KeyPoint keypoints1,
opencv_features2d.KeyPoint keypoints2,
FloatPointer repeatability,
IntPointer correspCount,
opencv_features2d.FeatureDetector fdetector)
\
Functions to evaluate the feature detectors and [generic] descriptor extractors *
\
|
static void |
opencv_features2d.evaluateGenericDescriptorMatcher(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_features2d.KeyPoint keypoints1,
opencv_features2d.KeyPoint keypoints2,
opencv_features2d.DMatchVectorVector matches1to2,
opencv_core.ByteVectorVector correctMatches1to2Mask,
opencv_core.Point2f recallPrecisionCurve) |
static void |
opencv_features2d.evaluateGenericDescriptorMatcher(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_features2d.KeyPoint keypoints1,
opencv_features2d.KeyPoint keypoints2,
opencv_features2d.DMatchVectorVector matches1to2,
opencv_core.ByteVectorVector correctMatches1to2Mask,
opencv_core.Point2f recallPrecisionCurve,
opencv_features2d.GenericDescriptorMatcher dmatch) |
static void |
opencv_core.exp(opencv_core.Mat src,
opencv_core.Mat dst)
computes exponent of each matrix element (dst = e**src)
|
static void |
opencv_core.extractChannel(opencv_core.Mat src,
opencv_core.Mat dst,
int coi)
extracts a single channel from src (coi is 0-based index)
|
static void |
opencv_core.extractImageCOI(opencv_core.CvArr arr,
opencv_core.Mat coiimg) |
static void |
opencv_core.extractImageCOI(opencv_core.CvArr arr,
opencv_core.Mat coiimg,
int coi)
extracts Channel of Interest from CvMat or IplImage and makes cv::Mat out of it.
|
static void |
opencv_features2d.FAST(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints,
int threshold) |
static void |
opencv_features2d.FAST(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints,
int threshold,
boolean nonmaxSuppression)
detects corners using FAST algorithm by E.
|
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.Mat src,
opencv_core.Mat dst,
float h,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingColored(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_photo.fastNlMeansDenoisingColored(opencv_core.Mat src,
opencv_core.Mat dst,
float h,
float hColor,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(opencv_core.MatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(opencv_core.MatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
float hColor,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.MatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.MatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_features2d.FASTX(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints,
int threshold,
boolean nonmaxSuppression,
int type) |
void |
opencv_stitching.Blender.feed(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Point tl) |
void |
opencv_stitching.FeatherBlender.feed(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Point tl) |
void |
opencv_stitching.MultiBandBlender.feed(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Point tl) |
void |
opencv_core.RNG.fill(opencv_core.Mat mat,
int distType,
opencv_core.Mat a,
opencv_core.Mat b) |
void |
opencv_core.RNG.fill(opencv_core.Mat mat,
int distType,
opencv_core.Mat a,
opencv_core.Mat b,
boolean saturateRange) |
static void |
opencv_core.fillConvexPoly(opencv_core.Mat img,
opencv_core.Mat points,
opencv_core.Scalar color) |
static void |
opencv_core.fillConvexPoly(opencv_core.Mat img,
opencv_core.Mat points,
opencv_core.Scalar color,
int lineType,
int shift) |
static void |
opencv_core.fillConvexPoly(opencv_core.Mat img,
opencv_core.Point pts,
int npts,
opencv_core.Scalar color) |
static void |
opencv_core.fillConvexPoly(opencv_core.Mat img,
opencv_core.Point pts,
int npts,
opencv_core.Scalar color,
int lineType,
int shift)
draws a filled convex polygon in the image
|
static void |
opencv_core.fillPoly(opencv_core.Mat img,
opencv_core.MatVector pts,
opencv_core.Scalar color) |
static void |
opencv_core.fillPoly(opencv_core.Mat img,
opencv_core.MatVector pts,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset) |
static void |
opencv_core.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
int[] npts,
int ncontours,
opencv_core.Scalar color) |
static void |
opencv_core.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
int[] npts,
int ncontours,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset) |
static void |
opencv_core.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
IntBuffer npts,
int ncontours,
opencv_core.Scalar color) |
static void |
opencv_core.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
IntBuffer npts,
int ncontours,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset) |
static void |
opencv_core.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
IntPointer npts,
int ncontours,
opencv_core.Scalar color) |
static void |
opencv_core.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
IntPointer npts,
int ncontours,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset) |
static void |
opencv_core.fillPoly(opencv_core.Mat img,
PointerPointer pts,
IntPointer npts,
int ncontours,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset)
fills an area bounded by one or more polygons
|
static void |
opencv_imgproc.filter2D(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Mat kernel) |
static void |
opencv_imgproc.filter2D(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Mat kernel,
opencv_core.Point anchor,
double delta,
int borderType)
applies non-separable 2D linear filter to the image
|
static void |
opencv_calib3d.filterSpeckles(opencv_core.Mat img,
double newVal,
int maxSpeckleSize,
double maxDiff) |
static void |
opencv_calib3d.filterSpeckles(opencv_core.Mat img,
double newVal,
int maxSpeckleSize,
double maxDiff,
opencv_core.Mat buf)
filters off speckles (small regions of incorrectly computed disparity)
|
float |
opencv_ml.CvKNearest.find_nearest(opencv_core.Mat samples,
int k) |
float |
opencv_ml.CvKNearest.find_nearest(opencv_core.Mat samples,
int k,
opencv_core.Mat results,
float[] neighbors,
opencv_core.Mat neighborResponses,
opencv_core.Mat dist) |
float |
opencv_ml.CvKNearest.find_nearest(opencv_core.Mat samples,
int k,
opencv_core.Mat results,
FloatBuffer neighbors,
opencv_core.Mat neighborResponses,
opencv_core.Mat dist) |
float |
opencv_ml.CvKNearest.find_nearest(opencv_core.Mat samples,
int k,
opencv_core.Mat results,
FloatPointer neighbors,
opencv_core.Mat neighborResponses,
opencv_core.Mat dist) |
float |
opencv_ml.CvKNearest.find_nearest(opencv_core.Mat samples,
int k,
opencv_core.Mat results,
opencv_core.Mat neighborResponses,
opencv_core.Mat dists) |
float |
opencv_ml.CvKNearest.find_nearest(opencv_core.Mat samples,
int k,
opencv_core.Mat results,
PointerPointer neighbors,
opencv_core.Mat neighborResponses,
opencv_core.Mat dist) |
static boolean |
opencv_calib3d.find4QuadCornerSubpix(opencv_core.Mat img,
opencv_core.Mat corners,
opencv_core.Size region_size)
finds subpixel-accurate positions of the chessboard corners
|
static boolean |
opencv_calib3d.findChessboardCorners(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat corners) |
static boolean |
opencv_calib3d.findChessboardCorners(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat corners,
int flags)
finds checkerboard pattern of the specified size in the image
|
static boolean |
opencv_calib3d.findCirclesGrid(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat centers) |
static boolean |
opencv_calib3d.findCirclesGrid(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat centers,
int flags,
opencv_features2d.FeatureDetector blobDetector)
finds circles' grid pattern of the specified size in the image
|
static boolean |
opencv_calib3d.findCirclesGridDefault(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat centers) |
static boolean |
opencv_calib3d.findCirclesGridDefault(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat centers,
int flags)
the deprecated function.
|
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.MatVector contours,
int mode,
int method) |
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.MatVector contours,
int mode,
int method,
opencv_core.Point offset)
retrieves contours from black-n-white image.
|
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.MatVector contours,
opencv_core.Mat hierarchy,
int mode,
int method) |
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.MatVector contours,
opencv_core.Mat hierarchy,
int mode,
int method,
opencv_core.Point offset)
retrieves contours and the hierarchical information from black-n-white image.
|
static void |
opencv_objdetect.findDataMatrix(opencv_core.Mat image,
opencv_core.StringVector codes) |
static void |
opencv_objdetect.findDataMatrix(opencv_core.Mat image,
opencv_core.StringVector codes,
opencv_core.Mat corners,
opencv_core.MatVector dmtx) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2,
int method,
double param1,
double param2,
opencv_core.Mat mask)
finds fundamental matrix from a set of corresponding 2D points
|
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat mask) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat mask,
int method,
double param1,
double param2)
variant of findFundamentalMat for backward compatibility
|
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints,
int method,
double ransacReprojThreshold,
opencv_core.Mat mask)
computes the best-fit perspective transformation mapping srcPoints to dstPoints.
|
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints,
opencv_core.Mat mask) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints,
opencv_core.Mat mask,
int method,
double ransacReprojThreshold)
variant of findHomography for backward compatibility
|
int |
opencv_core.KDTree.findNearest(opencv_core.Mat vec,
int K,
int Emax,
opencv_core.Mat neighborsIdx) |
int |
opencv_core.KDTree.findNearest(opencv_core.Mat vec,
int K,
int Emax,
opencv_core.Mat neighborsIdx,
opencv_core.Mat neighbors,
opencv_core.Mat dist,
opencv_core.Mat labels)
finds the K nearest neighbors of "vec" while looking at Emax (at most) leaves
|
static void |
opencv_core.findNonZero(opencv_core.Mat src,
opencv_core.Mat idx)
returns the list of locations of non-zero pixels
|
void |
opencv_core.KDTree.findOrthoRange(opencv_core.Mat minBounds,
opencv_core.Mat maxBounds,
opencv_core.Mat neighborsIdx) |
void |
opencv_core.KDTree.findOrthoRange(opencv_core.Mat minBounds,
opencv_core.Mat maxBounds,
opencv_core.Mat neighborsIdx,
opencv_core.Mat neighbors,
opencv_core.Mat labels)
finds all the points from the initial set that belong to the specified box
|
opencv_stitching.MatBytePairVector |
opencv_stitching.MatBytePairVector.first(long i,
opencv_core.Mat first) |
static opencv_core.RotatedRect |
opencv_imgproc.fitEllipse(opencv_core.Mat points)
fits ellipse to the set of 2D points
|
static void |
opencv_imgproc.fitLine(opencv_core.Mat points,
opencv_core.Mat line,
int distType,
double param,
double reps,
double aeps)
fits line to the set of 2D points using M-estimator algorithm
|
static void |
opencv_core.flip(opencv_core.Mat src,
opencv_core.Mat dst,
int flipCode)
reverses the order of the rows, columns or both in a matrix
|
static int |
opencv_imgproc.floodFill(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal) |
static int |
opencv_imgproc.floodFill(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal,
opencv_core.Rect rect,
opencv_core.Scalar loDiff,
opencv_core.Scalar upDiff,
int flags)
fills the semi-uniform image region and/or the mask starting from the specified seed point
|
static int |
opencv_imgproc.floodFill(opencv_core.Mat image,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal) |
static int |
opencv_imgproc.floodFill(opencv_core.Mat image,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal,
opencv_core.Rect rect,
opencv_core.Scalar loDiff,
opencv_core.Scalar upDiff,
int flags)
fills the semi-uniform image region starting from the specified seed point
|
static void |
opencv_stitching.focalsFromHomography(opencv_core.Mat H,
double[] f0,
double[] f1,
BoolPointer f0_ok,
BoolPointer f1_ok) |
static void |
opencv_stitching.focalsFromHomography(opencv_core.Mat H,
DoubleBuffer f0,
DoubleBuffer f1,
BoolPointer f0_ok,
BoolPointer f1_ok) |
static void |
opencv_stitching.focalsFromHomography(opencv_core.Mat H,
DoublePointer f0,
DoublePointer f1,
BoolPointer f0_ok,
BoolPointer f1_ok) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.gain(opencv_core.Mat gain) |
static void |
opencv_imgproc.GaussianBlur(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize,
double sigmaX) |
static void |
opencv_imgproc.GaussianBlur(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize,
double sigmaX,
double sigmaY,
int borderType)
smooths the image using Gaussian filter.
|
static void |
opencv_core.gemm(opencv_core.Mat src1,
opencv_core.Mat src2,
double alpha,
opencv_core.Mat src3,
double gamma,
opencv_core.Mat dst) |
static void |
opencv_core.gemm(opencv_core.Mat src1,
opencv_core.Mat src2,
double alpha,
opencv_core.Mat src3,
double gamma,
opencv_core.Mat dst,
int flags)
implements generalized matrix product algorithm GEMM from BLAS
|
opencv_core.Mat |
opencv_objdetect.CascadeClassifier.MaskGenerator.generateMask(opencv_core.Mat src) |
void |
opencv_legacy.PatchGenerator.generateRandomTransform(opencv_core.Point2f srcCenter,
opencv_core.Point2f dstCenter,
opencv_core.Mat transform,
opencv_core.RNG rng) |
void |
opencv_legacy.PatchGenerator.generateRandomTransform(opencv_core.Point2f srcCenter,
opencv_core.Point2f dstCenter,
opencv_core.Mat transform,
opencv_core.RNG rng,
boolean inverse) |
static opencv_core.Mat |
opencv_imgproc.getAffineTransform(opencv_core.Mat src,
opencv_core.Mat dst) |
void |
opencv_video.BackgroundSubtractor.getBackgroundImage(opencv_core.Mat backgroundImage)
computes a background image
|
void |
opencv_video.BackgroundSubtractorMOG2.getBackgroundImage(opencv_core.Mat backgroundImage)
computes a background image which are the mean of all background gaussians
|
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.Mat cameraMatrix) |
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Size imgsize,
boolean centerPrincipalPoint)
returns the default new camera matrix (by default it is the same as cameraMatrix unless centerPricipalPoint=true)
|
static void |
opencv_imgproc.getDerivKernels(opencv_core.Mat kx,
opencv_core.Mat ky,
int dx,
int dy,
int ksize) |
static void |
opencv_imgproc.getDerivKernels(opencv_core.Mat kx,
opencv_core.Mat ky,
int dx,
int dy,
int ksize,
boolean normalize,
int ktype)
initializes kernels of the generalized Sobel operator
|
static int |
opencv_imgproc.getKernelType(opencv_core.Mat kernel,
opencv_core.Point anchor)
returns type (one of KERNEL_*) of 1D or 2D kernel specified by its coefficients.
|
static opencv_imgproc.BaseColumnFilter |
opencv_imgproc.getLinearColumnFilter(int bufType,
int dstType,
opencv_core.Mat kernel,
int anchor,
int symmetryType) |
static opencv_imgproc.BaseColumnFilter |
opencv_imgproc.getLinearColumnFilter(int bufType,
int dstType,
opencv_core.Mat kernel,
int anchor,
int symmetryType,
double delta,
int bits)
returns the primitive column filter with the specified kernel
|
static opencv_imgproc.BaseFilter |
opencv_imgproc.getLinearFilter(int srcType,
int dstType,
opencv_core.Mat kernel) |
static opencv_imgproc.BaseFilter |
opencv_imgproc.getLinearFilter(int srcType,
int dstType,
opencv_core.Mat kernel,
opencv_core.Point anchor,
double delta,
int bits)
returns 2D filter with the specified kernel
|
static opencv_imgproc.BaseRowFilter |
opencv_imgproc.getLinearRowFilter(int srcType,
int bufType,
opencv_core.Mat kernel,
int anchor,
int symmetryType)
returns the primitive row filter with the specified kernel
|
void |
opencv_contrib.Retina.getMagno(opencv_core.Mat retinaOutput_magno)
accessor of the motion channel of the retina (models peripheral vision)
|
static opencv_imgproc.BaseFilter |
opencv_imgproc.getMorphologyFilter(int op,
int type,
opencv_core.Mat kernel) |
static opencv_imgproc.BaseFilter |
opencv_imgproc.getMorphologyFilter(int op,
int type,
opencv_core.Mat kernel,
opencv_core.Point anchor)
returns 2D morphological filter
|
void |
opencv_legacy.LDetector.getMostStable2D(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints,
int maxCount,
opencv_legacy.PatchGenerator patchGenerator) |
static opencv_core.Mat |
opencv_videostab.getMotion(int from,
int to,
opencv_core.Mat motions,
int size) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
double alpha) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect validPixROI,
boolean centerPrincipalPoint)
returns the optimal new camera matrix
|
void |
opencv_contrib.Retina.getParvo(opencv_core.Mat retinaOutput_parvo)
accessor of the details channel of the retina (models foveal vision)
|
static opencv_core.Mat |
opencv_imgproc.getPerspectiveTransform(opencv_core.Mat src,
opencv_core.Mat dst) |
void |
opencv_core.KDTree.getPoints(opencv_core.Mat idx,
opencv_core.Mat pts) |
void |
opencv_core.KDTree.getPoints(opencv_core.Mat idx,
opencv_core.Mat pts,
opencv_core.Mat labels)
returns vectors with the specified indices
|
static void |
opencv_imgproc.getRectSubPix(opencv_core.Mat image,
opencv_core.Size patchSize,
opencv_core.Point2f center,
opencv_core.Mat patch) |
static void |
opencv_imgproc.getRectSubPix(opencv_core.Mat image,
opencv_core.Size patchSize,
opencv_core.Point2f center,
opencv_core.Mat patch,
int patchType)
extracts rectangle from the image at sub-pixel location
|
static void |
opencv_imgproc.goodFeaturesToTrack(opencv_core.Mat image,
opencv_core.Mat corners,
int maxCorners,
double qualityLevel,
double minDistance) |
static void |
opencv_imgproc.goodFeaturesToTrack(opencv_core.Mat image,
opencv_core.Mat corners,
int maxCorners,
double qualityLevel,
double minDistance,
opencv_core.Mat mask,
int blockSize,
boolean useHarrisDetector,
double k)
finds the strong enough corners where the cornerMinEigenVal() or cornerHarris() report the local maxima
|
static void |
opencv_imgproc.grabCut(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Rect rect,
opencv_core.Mat bgdModel,
opencv_core.Mat fgdModel,
int iterCount) |
static void |
opencv_imgproc.grabCut(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Rect rect,
opencv_core.Mat bgdModel,
opencv_core.Mat fgdModel,
int iterCount,
int mode)
segments the image using GrabCut algorithm
|
opencv_stitching.MatchesInfo |
opencv_stitching.MatchesInfo.H(opencv_core.Mat H) |
static void |
opencv_core.hconcat(opencv_core.Mat src,
long nsrc,
opencv_core.Mat dst) |
static void |
opencv_core.hconcat(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.hconcat(opencv_core.MatVector src,
opencv_core.Mat dst) |
static void |
opencv_imgproc.HoughCircles(opencv_core.Mat image,
opencv_core.Mat circles,
int method,
double dp,
double minDist) |
static void |
opencv_imgproc.HoughCircles(opencv_core.Mat image,
opencv_core.Mat circles,
int method,
double dp,
double minDist,
double param1,
double param2,
int minRadius,
int maxRadius)
finds circles in the grayscale image using 2+1 gradient Hough transform
|
static void |
opencv_imgproc.HoughLines(opencv_core.Mat image,
opencv_core.Mat lines,
double rho,
double theta,
int threshold) |
static void |
opencv_imgproc.HoughLines(opencv_core.Mat image,
opencv_core.Mat lines,
double rho,
double theta,
int threshold,
double srn,
double stn)
finds lines in the black-n-white image using the standard or pyramid Hough transform
|
static void |
opencv_imgproc.HoughLinesP(opencv_core.Mat image,
opencv_core.Mat lines,
double rho,
double theta,
int threshold) |
static void |
opencv_imgproc.HoughLinesP(opencv_core.Mat image,
opencv_core.Mat lines,
double rho,
double theta,
int threshold,
double minLineLength,
double maxLineGap)
finds line segments in the black-n-white image using probabilistic Hough transform
|
static void |
opencv_imgproc.HuMoments(opencv_imgproc.Moments m,
opencv_core.Mat hu) |
static void |
opencv_core.idct(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_core.idct(opencv_core.Mat src,
opencv_core.Mat dst,
int flags)
performs inverse 1D or 2D Discrete Cosine Transformation
|
static void |
opencv_core.idft(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_core.idft(opencv_core.Mat src,
opencv_core.Mat dst,
int flags,
int nonzeroRows)
performs inverse 1D or 2D Discrete Fourier Transformation
|
static opencv_core.Mat |
opencv_highgui.imdecode(opencv_core.Mat buf,
int flags) |
static opencv_core.Mat |
opencv_highgui.imdecode(opencv_core.Mat buf,
int flags,
opencv_core.Mat dst) |
static boolean |
opencv_highgui.imencode(BytePointer ext,
opencv_core.Mat img,
byte[] buf) |
static boolean |
opencv_highgui.imencode(BytePointer ext,
opencv_core.Mat img,
byte[] buf,
int[] params) |
static boolean |
opencv_highgui.imencode(BytePointer ext,
opencv_core.Mat img,
ByteBuffer buf) |
static boolean |
opencv_highgui.imencode(BytePointer ext,
opencv_core.Mat img,
ByteBuffer buf,
IntBuffer params) |
static boolean |
opencv_highgui.imencode(BytePointer ext,
opencv_core.Mat img,
BytePointer buf) |
static boolean |
opencv_highgui.imencode(BytePointer ext,
opencv_core.Mat img,
BytePointer buf,
IntPointer params) |
static boolean |
opencv_highgui.imencode(String ext,
opencv_core.Mat img,
byte[] buf) |
static boolean |
opencv_highgui.imencode(String ext,
opencv_core.Mat img,
byte[] buf,
int[] params) |
static boolean |
opencv_highgui.imencode(String ext,
opencv_core.Mat img,
ByteBuffer buf) |
static boolean |
opencv_highgui.imencode(String ext,
opencv_core.Mat img,
ByteBuffer buf,
IntBuffer params) |
static boolean |
opencv_highgui.imencode(String ext,
opencv_core.Mat img,
BytePointer buf) |
static boolean |
opencv_highgui.imencode(String ext,
opencv_core.Mat img,
BytePointer buf,
IntPointer params) |
static void |
opencv_highgui.imshow(BytePointer winname,
opencv_core.Mat mat) |
static void |
opencv_highgui.imshow(String winname,
opencv_core.Mat mat) |
static boolean |
opencv_highgui.imwrite(BytePointer filename,
opencv_core.Mat img) |
static boolean |
opencv_highgui.imwrite(BytePointer filename,
opencv_core.Mat img,
int[] params) |
static boolean |
opencv_highgui.imwrite(BytePointer filename,
opencv_core.Mat img,
IntBuffer params) |
static boolean |
opencv_highgui.imwrite(BytePointer filename,
opencv_core.Mat img,
IntPointer params) |
static boolean |
opencv_highgui.imwrite(String filename,
opencv_core.Mat img) |
static boolean |
opencv_highgui.imwrite(String filename,
opencv_core.Mat img,
int[] params) |
static boolean |
opencv_highgui.imwrite(String filename,
opencv_core.Mat img,
IntBuffer params) |
static boolean |
opencv_highgui.imwrite(String filename,
opencv_core.Mat img,
IntPointer params) |
void |
opencv_core.NAryMatIterator.init(opencv_core.Mat arrays,
opencv_core.Mat planes,
byte[] ptrs) |
void |
opencv_core.NAryMatIterator.init(opencv_core.Mat arrays,
opencv_core.Mat planes,
byte[] ptrs,
int narrays) |
void |
opencv_core.NAryMatIterator.init(opencv_core.Mat arrays,
opencv_core.Mat planes,
ByteBuffer ptrs) |
void |
opencv_core.NAryMatIterator.init(opencv_core.Mat arrays,
opencv_core.Mat planes,
ByteBuffer ptrs,
int narrays) |
void |
opencv_core.NAryMatIterator.init(opencv_core.Mat arrays,
opencv_core.Mat planes,
BytePointer ptrs) |
void |
opencv_core.NAryMatIterator.init(opencv_core.Mat arrays,
opencv_core.Mat planes,
BytePointer ptrs,
int narrays) |
void |
opencv_core.NAryMatIterator.init(PointerPointer arrays,
opencv_core.Mat planes,
PointerPointer ptrs,
int narrays)
the separate iterator initialization method
|
void |
opencv_objdetect.CascadeClassifier.MaskGenerator.initializeMask(opencv_core.Mat arg0) |
static void |
opencv_imgproc.initUndistortRectifyMap(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat R,
opencv_core.Mat newCameraMatrix,
opencv_core.Size size,
int m1type,
opencv_core.Mat map1,
opencv_core.Mat map2)
initializes maps for cv::remap() to correct lens distortion and optionally rectify the image
|
static float |
opencv_imgproc.initWideAngleProjMap(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
int destImageWidth,
int m1type,
opencv_core.Mat map1,
opencv_core.Mat map2) |
static float |
opencv_imgproc.initWideAngleProjMap(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
int destImageWidth,
int m1type,
opencv_core.Mat map1,
opencv_core.Mat map2,
int projType,
double alpha)
initializes maps for cv::remap() for wide-angle
|
void |
opencv_videostab.InpainterBase.inpaint(int idx,
opencv_core.Mat frame,
opencv_core.Mat mask) |
void |
opencv_videostab.NullInpainter.inpaint(int arg0,
opencv_core.Mat arg1,
opencv_core.Mat arg2) |
void |
opencv_videostab.InpaintingPipeline.inpaint(int idx,
opencv_core.Mat frame,
opencv_core.Mat mask) |
void |
opencv_videostab.ConsistentMosaicInpainter.inpaint(int idx,
opencv_core.Mat frame,
opencv_core.Mat mask) |
void |
opencv_videostab.MotionInpainter.inpaint(int idx,
opencv_core.Mat frame,
opencv_core.Mat mask) |
void |
opencv_videostab.ColorAverageInpainter.inpaint(int idx,
opencv_core.Mat frame,
opencv_core.Mat mask) |
void |
opencv_videostab.ColorInpainter.inpaint(int idx,
opencv_core.Mat frame,
opencv_core.Mat mask) |
static void |
opencv_photo.inpaint(opencv_core.Mat src,
opencv_core.Mat inpaintMask,
opencv_core.Mat dst,
double inpaintRadius,
int flags)
restores the damaged image areas using one of the available intpainting algorithms
|
static void |
opencv_core.inRange(opencv_core.Mat src,
opencv_core.Mat lowerb,
opencv_core.Mat upperb,
opencv_core.Mat dst)
set mask elements for those array elements which are within the element-specific bounding box (dst = lowerb <= src && src < upperb)
|
static void |
opencv_core.insertChannel(opencv_core.Mat src,
opencv_core.Mat dst,
int coi)
inserts a single channel to dst (coi is 0-based index)
|
static void |
opencv_core.insertImageCOI(opencv_core.Mat coiimg,
opencv_core.CvArr arr) |
static void |
opencv_core.insertImageCOI(opencv_core.Mat coiimg,
opencv_core.CvArr arr,
int coi)
inserts single-channel cv::Mat into a multi-channel CvMat or IplImage
|
static void |
opencv_imgproc.integral(opencv_core.Mat src,
opencv_core.Mat sum) |
static void |
opencv_imgproc.integral(opencv_core.Mat src,
opencv_core.Mat sum,
int sdepth)
computes the integral image
|
static void |
opencv_imgproc.integral2(opencv_core.Mat src,
opencv_core.Mat sum,
opencv_core.Mat sqsum) |
static void |
opencv_imgproc.integral2(opencv_core.Mat src,
opencv_core.Mat sum,
opencv_core.Mat sqsum,
int sdepth)
computes the integral image and integral for the squared image
|
static void |
opencv_imgproc.integral3(opencv_core.Mat src,
opencv_core.Mat sum,
opencv_core.Mat sqsum,
opencv_core.Mat tilted) |
static void |
opencv_imgproc.integral3(opencv_core.Mat src,
opencv_core.Mat sum,
opencv_core.Mat sqsum,
opencv_core.Mat tilted,
int sdepth)
computes the integral image, integral for the squared image and the tilted integral image
|
static float |
opencv_imgproc.intersectConvexConvex(opencv_core.Mat _p1,
opencv_core.Mat _p2,
opencv_core.Mat _p12) |
static float |
opencv_imgproc.intersectConvexConvex(opencv_core.Mat _p1,
opencv_core.Mat _p2,
opencv_core.Mat _p12,
boolean handleNested)
finds intersection of two convex polygons
|
static double |
opencv_core.invert(opencv_core.Mat src,
opencv_core.Mat dst) |
static double |
opencv_core.invert(opencv_core.Mat src,
opencv_core.Mat dst,
int flags)
computes inverse or pseudo-inverse matrix
|
static void |
opencv_imgproc.invertAffineTransform(opencv_core.Mat M,
opencv_core.Mat iM)
computes 2x3 affine transformation matrix that is inverse to the specified 2x3 affine transformation.
|
static boolean |
opencv_imgproc.isContourConvex(opencv_core.Mat contour)
returns true if the contour is convex.
|
static double |
opencv_core.kmeans(opencv_core.Mat data,
int K,
opencv_core.Mat bestLabels,
opencv_core.TermCriteria criteria,
int attempts,
int flags) |
static double |
opencv_core.kmeans(opencv_core.Mat data,
int K,
opencv_core.Mat bestLabels,
opencv_core.TermCriteria criteria,
int attempts,
int flags,
opencv_core.Mat centers)
clusters the input data using k-Means algorithm
|
void |
opencv_features2d.DescriptorMatcher.knnMatch(opencv_core.Mat queryDescriptors,
opencv_core.Mat trainDescriptors,
opencv_features2d.DMatchVectorVector matches,
int k) |
void |
opencv_features2d.DescriptorMatcher.knnMatch(opencv_core.Mat queryDescriptors,
opencv_core.Mat trainDescriptors,
opencv_features2d.DMatchVectorVector matches,
int k,
opencv_core.Mat mask,
boolean compactResult) |
void |
opencv_features2d.DescriptorMatcher.knnMatch(opencv_core.Mat queryDescriptors,
opencv_features2d.DMatchVectorVector matches,
int k) |
void |
opencv_features2d.DescriptorMatcher.knnMatch(opencv_core.Mat queryDescriptors,
opencv_features2d.DMatchVectorVector matches,
int k,
opencv_core.MatVector masks,
boolean compactResult) |
void |
opencv_features2d.GenericDescriptorMatcher.knnMatch(opencv_core.Mat queryImage,
opencv_features2d.KeyPoint queryKeypoints,
opencv_core.Mat trainImage,
opencv_features2d.KeyPoint trainKeypoints,
opencv_features2d.DMatchVectorVector matches,
int k) |
void |
opencv_features2d.GenericDescriptorMatcher.knnMatch(opencv_core.Mat queryImage,
opencv_features2d.KeyPoint queryKeypoints,
opencv_core.Mat trainImage,
opencv_features2d.KeyPoint trainKeypoints,
opencv_features2d.DMatchVectorVector matches,
int k,
opencv_core.Mat mask,
boolean compactResult) |
void |
opencv_features2d.GenericDescriptorMatcher.knnMatch(opencv_core.Mat queryImage,
opencv_features2d.KeyPoint queryKeypoints,
opencv_features2d.DMatchVectorVector matches,
int k) |
void |
opencv_features2d.GenericDescriptorMatcher.knnMatch(opencv_core.Mat queryImage,
opencv_features2d.KeyPoint queryKeypoints,
opencv_features2d.DMatchVectorVector matches,
int k,
opencv_core.MatVector masks,
boolean compactResult) |
void |
opencv_flann.Index.knnSearch(opencv_core.Mat query,
opencv_core.Mat indices,
opencv_core.Mat dists,
int knn) |
void |
opencv_flann.Index.knnSearch(opencv_core.Mat query,
opencv_core.Mat indices,
opencv_core.Mat dists,
int knn,
opencv_flann.SearchParams params) |
static void |
opencv_imgproc.Laplacian(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth) |
static void |
opencv_imgproc.Laplacian(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
int ksize,
double scale,
double delta,
int borderType)
applies Laplacian operator to the image
|
static void |
opencv_core.line(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color) |
static void |
opencv_core.line(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
draws the line segment (pt1, pt2) in the image
|
boolean |
opencv_flann.Index.load(opencv_core.Mat features,
BytePointer filename) |
boolean |
opencv_flann.Index.load(opencv_core.Mat features,
String filename) |
static void |
opencv_core.log(opencv_core.Mat src,
opencv_core.Mat dst)
computes natural logarithm of absolute value of each matrix element: dst = log(abs(src))
|
static void |
opencv_core.LUT(opencv_core.Mat src,
opencv_core.Mat lut,
opencv_core.Mat dst) |
static void |
opencv_core.LUT(opencv_core.Mat src,
opencv_core.Mat lut,
opencv_core.Mat dst,
int interpolation)
transforms array of numbers using a lookup table: dst(i)=lut(src(i))
|
static void |
opencv_core.magnitude(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat magnitude)
computes magnitude (magnitude(i)) of each (x(i), y(i)) vector
|
static double |
opencv_core.Mahalanobis(opencv_core.Mat v1,
opencv_core.Mat v2,
opencv_core.Mat icovar)
computes Mahalanobis distance between two vectors: sqrt((v1-v2)'*icovar*(v1-v2)), where icovar is the inverse covariation matrix
|
static double |
opencv_core.Mahalonobis(opencv_core.Mat v1,
opencv_core.Mat v2,
opencv_core.Mat icovar)
a synonym for Mahalanobis
|
void |
opencv_features2d.DescriptorMatcher.match(opencv_core.Mat queryDescriptors,
opencv_core.Mat trainDescriptors,
opencv_features2d.DMatch matches) |
void |
opencv_features2d.DescriptorMatcher.match(opencv_core.Mat queryDescriptors,
opencv_core.Mat trainDescriptors,
opencv_features2d.DMatch matches,
opencv_core.Mat mask) |
void |
opencv_features2d.DescriptorMatcher.match(opencv_core.Mat queryDescriptors,
opencv_features2d.DMatch matches) |
void |
opencv_features2d.DescriptorMatcher.match(opencv_core.Mat queryDescriptors,
opencv_features2d.DMatch matches,
opencv_core.MatVector masks) |
void |
opencv_features2d.GenericDescriptorMatcher.match(opencv_core.Mat queryImage,
opencv_features2d.KeyPoint queryKeypoints,
opencv_core.Mat trainImage,
opencv_features2d.KeyPoint trainKeypoints,
opencv_features2d.DMatch matches) |
void |
opencv_features2d.GenericDescriptorMatcher.match(opencv_core.Mat queryImage,
opencv_features2d.KeyPoint queryKeypoints,
opencv_core.Mat trainImage,
opencv_features2d.KeyPoint trainKeypoints,
opencv_features2d.DMatch matches,
opencv_core.Mat mask) |
void |
opencv_features2d.GenericDescriptorMatcher.match(opencv_core.Mat queryImage,
opencv_features2d.KeyPoint queryKeypoints,
opencv_features2d.DMatch matches) |
void |
opencv_features2d.GenericDescriptorMatcher.match(opencv_core.Mat queryImage,
opencv_features2d.KeyPoint queryKeypoints,
opencv_features2d.DMatch matches,
opencv_core.MatVector masks) |
static double |
opencv_imgproc.matchShapes(opencv_core.Mat contour1,
opencv_core.Mat contour2,
int method,
double parameter)
matches two contours using one of the available algorithms
|
static void |
opencv_imgproc.matchTemplate(opencv_core.Mat image,
opencv_core.Mat templ,
opencv_core.Mat result,
int method)
computes the proximity map for the raster template and the image where the template is searched for
|
static void |
opencv_calib3d.matMulDeriv(opencv_core.Mat A,
opencv_core.Mat B,
opencv_core.Mat dABdA,
opencv_core.Mat dABdB)
computes derivatives of the matrix product w.r.t each of the multiplied matrix coefficients
|
static void |
opencv_core.max(opencv_core.Mat src1,
double src2,
opencv_core.Mat dst)
computes per-element maximum of array and scalar (dst = max(src1, src2))
|
static void |
opencv_core.max(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst)
computes per-element maximum of two arrays (dst = max(src1, src2))
|
static opencv_core.Scalar |
opencv_core.mean(opencv_core.Mat src) |
opencv_core.PCA |
opencv_core.PCA.mean(opencv_core.Mat mean) |
static opencv_core.Scalar |
opencv_core.mean(opencv_core.Mat src,
opencv_core.Mat mask)
computes mean value of selected array elements
|
static int |
opencv_video.meanShift(opencv_core.Mat probImage,
opencv_core.Rect window,
opencv_core.TermCriteria criteria)
updates the object tracking window using meanshift algorithm
|
static void |
opencv_core.meanStdDev(opencv_core.Mat src,
opencv_core.Mat mean,
opencv_core.Mat stddev) |
static void |
opencv_core.meanStdDev(opencv_core.Mat src,
opencv_core.Mat mean,
opencv_core.Mat stddev,
opencv_core.Mat mask)
computes mean value and standard deviation of all or selected array elements
|
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.measurementMatrix(opencv_core.Mat measurementMatrix) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.measurementNoiseCov(opencv_core.Mat measurementNoiseCov) |
static void |
opencv_imgproc.medianBlur(opencv_core.Mat src,
opencv_core.Mat dst,
int ksize)
smooths the image using median filter.
|
static void |
opencv_core.merge(opencv_core.Mat mv,
long count,
opencv_core.Mat dst)
makes multi-channel array out of several single-channel arrays
|
static void |
opencv_core.merge(opencv_core.MatVector mv,
opencv_core.Mat dst) |
static void |
opencv_core.min(opencv_core.Mat src1,
double src2,
opencv_core.Mat dst)
computes per-element minimum of array and scalar (dst = min(src1, src2))
|
static void |
opencv_core.min(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst)
computes per-element minimum of two arrays (dst = min(src1, src2))
|
static opencv_core.RotatedRect |
opencv_imgproc.minAreaRect(opencv_core.Mat points)
computes the minimal rotated rectangle for a set of points
|
static void |
opencv_imgproc.minEnclosingCircle(opencv_core.Mat points,
opencv_core.Point2f center,
float[] radius) |
static void |
opencv_imgproc.minEnclosingCircle(opencv_core.Mat points,
opencv_core.Point2f center,
FloatBuffer radius) |
static void |
opencv_imgproc.minEnclosingCircle(opencv_core.Mat points,
opencv_core.Point2f center,
FloatPointer radius)
computes the minimal enclosing circle for a set of points
|
static void |
opencv_core.minMaxIdx(opencv_core.Mat src,
double[] minVal,
double[] maxVal) |
static void |
opencv_core.minMaxIdx(opencv_core.Mat src,
double[] minVal,
double[] maxVal,
int[] minIdx,
int[] maxIdx,
opencv_core.Mat mask) |
static void |
opencv_core.minMaxIdx(opencv_core.Mat src,
DoubleBuffer minVal,
DoubleBuffer maxVal) |
static void |
opencv_core.minMaxIdx(opencv_core.Mat src,
DoubleBuffer minVal,
DoubleBuffer maxVal,
IntBuffer minIdx,
IntBuffer maxIdx,
opencv_core.Mat mask) |
static void |
opencv_core.minMaxIdx(opencv_core.Mat src,
DoublePointer minVal,
DoublePointer maxVal) |
static void |
opencv_core.minMaxIdx(opencv_core.Mat src,
DoublePointer minVal,
DoublePointer maxVal,
IntPointer minIdx,
IntPointer maxIdx,
opencv_core.Mat mask) |
static void |
opencv_core.minMaxLoc(opencv_core.Mat src,
double[] minVal) |
static void |
opencv_core.minMaxLoc(opencv_core.Mat src,
double[] minVal,
double[] maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc,
opencv_core.Mat mask) |
static void |
opencv_core.minMaxLoc(opencv_core.Mat src,
DoubleBuffer minVal) |
static void |
opencv_core.minMaxLoc(opencv_core.Mat src,
DoubleBuffer minVal,
DoubleBuffer maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc,
opencv_core.Mat mask) |
static void |
opencv_core.minMaxLoc(opencv_core.Mat src,
DoublePointer minVal) |
static void |
opencv_core.minMaxLoc(opencv_core.Mat src,
DoublePointer minVal,
DoublePointer maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc,
opencv_core.Mat mask)
finds global minimum and maximum array elements and returns their values and their locations
|
static void |
opencv_core.mixChannels(opencv_core.Mat src,
long nsrcs,
opencv_core.Mat dst,
long ndsts,
int[] fromTo,
long npairs) |
static void |
opencv_core.mixChannels(opencv_core.Mat src,
long nsrcs,
opencv_core.Mat dst,
long ndsts,
IntBuffer fromTo,
long npairs) |
static void |
opencv_core.mixChannels(opencv_core.Mat src,
long nsrcs,
opencv_core.Mat dst,
long ndsts,
IntPointer fromTo,
long npairs)
copies selected channels from the input arrays to the selected channels of the output arrays
|
static opencv_imgproc.Moments |
opencv_imgproc.moments(opencv_core.Mat array) |
static opencv_imgproc.Moments |
opencv_imgproc.moments(opencv_core.Mat array,
boolean binaryImage)
computes moments of the rasterized shape or a vector of points
|
static void |
opencv_imgproc.morphologyEx(opencv_core.Mat src,
opencv_core.Mat dst,
int op,
opencv_core.Mat kernel) |
static void |
opencv_imgproc.morphologyEx(opencv_core.Mat src,
opencv_core.Mat dst,
int op,
opencv_core.Mat kernel,
opencv_core.Point anchor,
int iterations,
int borderType,
opencv_core.Scalar borderValue)
applies an advanced morphological operation to the image
|
opencv_core.MatExpr |
opencv_core.Mat.mul(opencv_core.Mat m) |
opencv_core.MatExpr |
opencv_core.Mat.mul(opencv_core.Mat m,
double scale)
per-element matrix multiplication by means of matrix expressions
|
static void |
opencv_core.mulSpectrums(opencv_core.Mat a,
opencv_core.Mat b,
opencv_core.Mat c,
int flags) |
static void |
opencv_core.mulSpectrums(opencv_core.Mat a,
opencv_core.Mat b,
opencv_core.Mat c,
int flags,
boolean conjB)
computes element-wise product of the two Fourier spectrums.
|
static void |
opencv_core.multiply(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.multiply(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
double scale,
int dtype)
computes element-wise weighted product of the two arrays (dst = scale*src1*src2)
|
static void |
opencv_core.mulTransposed(opencv_core.Mat src,
opencv_core.Mat dst,
boolean aTa) |
static void |
opencv_core.mulTransposed(opencv_core.Mat src,
opencv_core.Mat dst,
boolean aTa,
opencv_core.Mat delta,
double scale,
int dtype)
multiplies matrix by its transposition from the left or from the right
|
void |
opencv_contrib.CvHybridTracker.newTracker(opencv_core.Mat image,
opencv_core.Rect selection) |
void |
opencv_contrib.CvMeanShiftTracker.newTrackingWindow(opencv_core.Mat image,
opencv_core.Rect selection) |
void |
opencv_contrib.CvFeatureTracker.newTrackingWindow(opencv_core.Mat image,
opencv_core.Rect selection) |
void |
opencv_superres.FrameSource.nextFrame(opencv_core.Mat frame) |
void |
opencv_superres.SuperResolution.nextFrame(opencv_core.Mat frame) |
static double |
opencv_core.norm(opencv_core.Mat src1) |
static double |
opencv_core.norm(opencv_core.Mat src1,
int normType,
opencv_core.Mat mask)
computes norm of the selected array part
|
static double |
opencv_core.norm(opencv_core.Mat src1,
opencv_core.Mat src2) |
static double |
opencv_core.norm(opencv_core.Mat src1,
opencv_core.Mat src2,
int normType,
opencv_core.Mat mask)
computes norm of selected part of the difference between two arrays
|
static void |
opencv_core.normalize(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_core.normalize(opencv_core.Mat src,
opencv_core.Mat dst,
double alpha,
double beta,
int norm_type,
int dtype,
opencv_core.Mat mask)
scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values
|
static void |
opencv_stitching.normalizeUsingWeightMap(opencv_core.Mat weight,
opencv_core.Mat src) |
static void |
opencv_core.patchNaNs(opencv_core.Mat a) |
static void |
opencv_core.patchNaNs(opencv_core.Mat a,
double val)
converts NaN's to the given number
|
static void |
opencv_core.PCABackProject(opencv_core.Mat data,
opencv_core.Mat mean,
opencv_core.Mat eigenvectors,
opencv_core.Mat result) |
static void |
opencv_core.PCACompute(opencv_core.Mat data,
opencv_core.Mat mean,
opencv_core.Mat eigenvectors) |
static void |
opencv_core.PCACompute(opencv_core.Mat data,
opencv_core.Mat mean,
opencv_core.Mat eigenvectors,
int maxComponents) |
static void |
opencv_core.PCAComputeVar(opencv_core.Mat data,
opencv_core.Mat mean,
opencv_core.Mat eigenvectors,
double retainedVariance) |
static void |
opencv_core.PCAProject(opencv_core.Mat data,
opencv_core.Mat mean,
opencv_core.Mat eigenvectors,
opencv_core.Mat result) |
static void |
opencv_core.perspectiveTransform(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat m)
performs perspective transformation of each element of multi-channel input matrix
|
static void |
opencv_core.phase(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat angle) |
static void |
opencv_core.phase(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat angle,
boolean angleInDegrees)
computes angle (angle(i)) of each (x(i), y(i)) vector
|
static opencv_core.Point2d |
opencv_imgproc.phaseCorrelate(opencv_core.Mat src1,
opencv_core.Mat src2) |
static opencv_core.Point2d |
opencv_imgproc.phaseCorrelate(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat window) |
static opencv_core.Point2d |
opencv_imgproc.phaseCorrelateRes(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat window) |
static opencv_core.Point2d |
opencv_imgproc.phaseCorrelateRes(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat window,
double[] response) |
static opencv_core.Point2d |
opencv_imgproc.phaseCorrelateRes(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat window,
DoubleBuffer response) |
static opencv_core.Point2d |
opencv_imgproc.phaseCorrelateRes(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat window,
DoublePointer response) |
opencv_core.NAryMatIterator |
opencv_core.NAryMatIterator.planes(opencv_core.Mat planes) |
static void |
opencv_highgui.pointCloudShow(BytePointer winname,
opencv_core.GlCamera camera,
opencv_core.Mat points) |
static void |
opencv_highgui.pointCloudShow(BytePointer winname,
opencv_core.GlCamera camera,
opencv_core.Mat points,
opencv_core.Mat colors) |
static void |
opencv_highgui.pointCloudShow(String winname,
opencv_core.GlCamera camera,
opencv_core.Mat points) |
static void |
opencv_highgui.pointCloudShow(String winname,
opencv_core.GlCamera camera,
opencv_core.Mat points,
opencv_core.Mat colors) |
static double |
opencv_imgproc.pointPolygonTest(opencv_core.Mat contour,
opencv_core.Point2f pt,
boolean measureDist)
checks if the point is inside the contour.
|
opencv_core.KDTree |
opencv_core.KDTree.points(opencv_core.Mat points) |
static void |
opencv_core.polarToCart(opencv_core.Mat magnitude,
opencv_core.Mat angle,
opencv_core.Mat x,
opencv_core.Mat y) |
static void |
opencv_core.polarToCart(opencv_core.Mat magnitude,
opencv_core.Mat angle,
opencv_core.Mat x,
opencv_core.Mat y,
boolean angleInDegrees)
converts polar coordinates to Cartesian
|
static void |
opencv_contrib.polyfit(opencv_core.Mat srcx,
opencv_core.Mat srcy,
opencv_core.Mat dst,
int order) |
static void |
opencv_core.polylines(opencv_core.Mat img,
opencv_core.MatVector pts,
boolean isClosed,
opencv_core.Scalar color) |
static void |
opencv_core.polylines(opencv_core.Mat img,
opencv_core.MatVector pts,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_core.polylines(opencv_core.Mat img,
opencv_core.Point pts,
int[] npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color) |
static void |
opencv_core.polylines(opencv_core.Mat img,
opencv_core.Point pts,
int[] npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_core.polylines(opencv_core.Mat img,
opencv_core.Point pts,
IntBuffer npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color) |
static void |
opencv_core.polylines(opencv_core.Mat img,
opencv_core.Point pts,
IntBuffer npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_core.polylines(opencv_core.Mat img,
opencv_core.Point pts,
IntPointer npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color) |
static void |
opencv_core.polylines(opencv_core.Mat img,
opencv_core.Point pts,
IntPointer npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_core.polylines(opencv_core.Mat img,
PointerPointer pts,
IntPointer npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
draws one or more polygonal curves
|
static void |
opencv_core.pow(opencv_core.Mat src,
double power,
opencv_core.Mat dst)
raises the input matrix elements to the specified power (b = a**power)
|
static void |
opencv_imgproc.preCornerDetect(opencv_core.Mat src,
opencv_core.Mat dst,
int ksize) |
static void |
opencv_imgproc.preCornerDetect(opencv_core.Mat src,
opencv_core.Mat dst,
int ksize,
int borderType)
computes another complex cornerness criteria at each pixel
|
void |
opencv_ml.CvSVM.predict_all(opencv_core.Mat samples,
opencv_core.Mat results) |
float |
opencv_ml.CvRTrees.predict_prob(opencv_core.Mat sample) |
float |
opencv_ml.CvRTrees.predict_prob(opencv_core.Mat sample,
opencv_core.Mat missing) |
int |
opencv_contrib.FaceRecognizer.predict(opencv_core.Mat src) |
opencv_core.Mat |
opencv_video.KalmanFilter.predict(opencv_core.Mat control)
computes predicted state
|
float |
opencv_legacy.CvEM.predict(opencv_core.Mat sample) |
float |
opencv_ml.CvNormalBayesClassifier.predict(opencv_core.Mat samples) |
float |
opencv_ml.CvSVM.predict(opencv_core.Mat sample) |
opencv_core.Point2d |
opencv_ml.EM.predict(opencv_core.Mat sample) |
opencv_ml.CvDTreeNode |
opencv_ml.CvDTree.predict(opencv_core.Mat sample) |
float |
opencv_ml.CvRTrees.predict(opencv_core.Mat sample) |
float |
opencv_ml.CvBoost.predict(opencv_core.Mat sample) |
float |
opencv_ml.CvGBTrees.predict(opencv_core.Mat sample) |
float |
opencv_ml.CvSVM.predict(opencv_core.Mat sample,
boolean returnDFVal) |
void |
opencv_contrib.FaceRecognizer.predict(opencv_core.Mat src,
int[] label,
double[] confidence) |
void |
opencv_contrib.FaceRecognizer.predict(opencv_core.Mat src,
IntBuffer label,
DoubleBuffer confidence) |
void |
opencv_contrib.FaceRecognizer.predict(opencv_core.Mat src,
IntPointer label,
DoublePointer confidence) |
float |
opencv_legacy.CvEM.predict(opencv_core.Mat sample,
opencv_core.Mat probs) |
float |
opencv_ml.CvNormalBayesClassifier.predict(opencv_core.Mat samples,
opencv_core.Mat results) |
opencv_core.Point2d |
opencv_ml.EM.predict(opencv_core.Mat sample,
opencv_core.Mat probs) |
float |
opencv_ml.CvRTrees.predict(opencv_core.Mat sample,
opencv_core.Mat missing) |
float |
opencv_ml.CvANN_MLP.predict(opencv_core.Mat inputs,
opencv_core.Mat outputs) |
opencv_ml.CvDTreeNode |
opencv_ml.CvDTree.predict(opencv_core.Mat sample,
opencv_core.Mat missingDataMask,
boolean preprocessedInput) |
float |
opencv_ml.CvBoost.predict(opencv_core.Mat sample,
opencv_core.Mat missing,
opencv_core.Range slice,
boolean rawMode,
boolean returnSum) |
float |
opencv_ml.CvGBTrees.predict(opencv_core.Mat sample,
opencv_core.Mat missing,
opencv_core.Range slice,
int k) |
void |
opencv_contrib.DetectionBasedTracker.process(opencv_core.Mat imageGray) |
opencv_objdetect.QuantizedPyramid |
opencv_objdetect.Modality.process(opencv_core.Mat src) |
opencv_objdetect.QuantizedPyramid |
opencv_objdetect.Modality.process(opencv_core.Mat src,
opencv_core.Mat mask)
\brief Form a quantized image pyramid from a source image.
|
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.processNoiseCov(opencv_core.Mat processNoiseCov) |
opencv_core.Mat |
opencv_contrib.LDA.project(opencv_core.Mat src) |
opencv_core.Mat |
opencv_core.PCA.project(opencv_core.Mat vec)
projects vector from the original space to the principal components subspace
|
void |
opencv_core.PCA.project(opencv_core.Mat vec,
opencv_core.Mat result)
projects vector from the original space to the principal components subspace
|
static void |
opencv_calib3d.projectPoints(opencv_core.Mat objectPoints,
opencv_core.Mat rvec,
opencv_core.Mat tvec,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat imagePoints) |
static void |
opencv_calib3d.projectPoints(opencv_core.Mat objectPoints,
opencv_core.Mat rvec,
opencv_core.Mat tvec,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat imagePoints,
opencv_core.Mat jacobian,
double aspectRatio)
projects points from the model coordinate space to the image coordinates.
|
static double |
opencv_imgproc.PSNR(opencv_core.Mat src1,
opencv_core.Mat src2)
computes PSNR image/video quality metric
|
void |
opencv_core.Mat.push_back(opencv_core.Mat m)
adds element to the end of 1d matrix (or possibly multiple elements when _Tp=Mat)
|
opencv_core.MatVector |
opencv_core.MatVector.put(long i,
opencv_core.Mat value) |
opencv_core.MatVector |
opencv_core.MatVector.put(opencv_core.Mat... array) |
opencv_core.Mat |
opencv_core.Mat.put(opencv_core.Mat m)
assignment operators
|
opencv_core.SparseMat |
opencv_core.SparseMat.put(opencv_core.Mat m)
equivalent to the corresponding constructor
|
static void |
opencv_core.putText(opencv_core.Mat img,
BytePointer text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color) |
static void |
opencv_core.putText(opencv_core.Mat img,
BytePointer text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color,
int thickness,
int lineType,
boolean bottomLeftOrigin)
renders text string in the image
|
static void |
opencv_core.putText(opencv_core.Mat img,
String text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color) |
static void |
opencv_core.putText(opencv_core.Mat img,
String text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color,
int thickness,
int lineType,
boolean bottomLeftOrigin) |
static void |
opencv_imgproc.pyrDown(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_imgproc.pyrDown(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dstsize,
int borderType)
smooths and downsamples the image
|
static void |
opencv_imgproc.pyrMeanShiftFiltering(opencv_core.Mat src,
opencv_core.Mat dst,
double sp,
double sr) |
static void |
opencv_imgproc.pyrMeanShiftFiltering(opencv_core.Mat src,
opencv_core.Mat dst,
double sp,
double sr,
int maxLevel,
opencv_core.TermCriteria termcrit)
filters image using meanshift algorithm
|
static void |
opencv_imgproc.pyrUp(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_imgproc.pyrUp(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dstsize,
int borderType)
upsamples and smoothes the image
|
void |
opencv_objdetect.QuantizedPyramid.quantize(opencv_core.Mat dst)
\brief Compute quantized image at current pyramid level for online detection.
|
opencv_stitching.CameraParams |
opencv_stitching.CameraParams.R(opencv_core.Mat R) |
void |
opencv_features2d.DescriptorMatcher.radiusMatch(opencv_core.Mat queryDescriptors,
opencv_core.Mat trainDescriptors,
opencv_features2d.DMatchVectorVector matches,
float maxDistance) |
void |
opencv_features2d.DescriptorMatcher.radiusMatch(opencv_core.Mat queryDescriptors,
opencv_core.Mat trainDescriptors,
opencv_features2d.DMatchVectorVector matches,
float maxDistance,
opencv_core.Mat mask,
boolean compactResult) |
void |
opencv_features2d.DescriptorMatcher.radiusMatch(opencv_core.Mat queryDescriptors,
opencv_features2d.DMatchVectorVector matches,
float maxDistance) |
void |
opencv_features2d.DescriptorMatcher.radiusMatch(opencv_core.Mat queryDescriptors,
opencv_features2d.DMatchVectorVector matches,
float maxDistance,
opencv_core.MatVector masks,
boolean compactResult) |
void |
opencv_features2d.GenericDescriptorMatcher.radiusMatch(opencv_core.Mat queryImage,
opencv_features2d.KeyPoint queryKeypoints,
opencv_core.Mat trainImage,
opencv_features2d.KeyPoint trainKeypoints,
opencv_features2d.DMatchVectorVector matches,
float maxDistance) |
void |
opencv_features2d.GenericDescriptorMatcher.radiusMatch(opencv_core.Mat queryImage,
opencv_features2d.KeyPoint queryKeypoints,
opencv_core.Mat trainImage,
opencv_features2d.KeyPoint trainKeypoints,
opencv_features2d.DMatchVectorVector matches,
float maxDistance,
opencv_core.Mat mask,
boolean compactResult) |
void |
opencv_features2d.GenericDescriptorMatcher.radiusMatch(opencv_core.Mat queryImage,
opencv_features2d.KeyPoint queryKeypoints,
opencv_features2d.DMatchVectorVector matches,
float maxDistance) |
void |
opencv_features2d.GenericDescriptorMatcher.radiusMatch(opencv_core.Mat queryImage,
opencv_features2d.KeyPoint queryKeypoints,
opencv_features2d.DMatchVectorVector matches,
float maxDistance,
opencv_core.MatVector masks,
boolean compactResult) |
int |
opencv_flann.Index.radiusSearch(opencv_core.Mat query,
opencv_core.Mat indices,
opencv_core.Mat dists,
double radius,
int maxResults) |
int |
opencv_flann.Index.radiusSearch(opencv_core.Mat query,
opencv_core.Mat indices,
opencv_core.Mat dists,
double radius,
int maxResults,
opencv_flann.SearchParams params) |
static void |
opencv_core.randn(opencv_core.Mat dst,
opencv_core.Mat mean,
opencv_core.Mat stddev)
fills array with normally-distributed random numbers with the specified mean and the standard deviation
|
static void |
opencv_core.randShuffle(opencv_core.Mat dst) |
static void |
opencv_core.randShuffle(opencv_core.Mat dst,
double iterFactor) |
static void |
opencv_core.randShuffle(opencv_core.Mat dst,
double iterFactor,
opencv_core.RNG rng)
shuffles the input array elements
|
static void |
opencv_core.randu(opencv_core.Mat dst,
opencv_core.Mat low,
opencv_core.Mat high)
fills array with uniformly-distributed random numbers from the range [low, high)
|
boolean |
opencv_highgui.VideoCapture.read(opencv_core.Mat image) |
opencv_core.Mat |
opencv_contrib.LDA.reconstruct(opencv_core.Mat src) |
static void |
opencv_core.rectangle(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color) |
static void |
opencv_core.rectangle(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image
|
static void |
opencv_core.rectangle(opencv_core.Mat img,
opencv_core.Rect rec,
opencv_core.Scalar color) |
static void |
opencv_core.rectangle(opencv_core.Mat img,
opencv_core.Rect rec,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
draws the rectangle outline or a solid rectangle covering rec in the image
|
static float |
opencv_calib3d.rectify3Collinear(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Mat cameraMatrix3,
opencv_core.Mat distCoeffs3,
opencv_core.MatVector imgpt1,
opencv_core.MatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.Mat R12,
opencv_core.Mat T12,
opencv_core.Mat R13,
opencv_core.Mat T13,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat R3,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat P3,
opencv_core.Mat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags)
computes the rectification transformations for 3-head camera, where all the heads are on the same line.
|
static void |
opencv_core.reduce(opencv_core.Mat src,
opencv_core.Mat dst,
int dim,
int rtype) |
static void |
opencv_core.reduce(opencv_core.Mat src,
opencv_core.Mat dst,
int dim,
int rtype,
int dtype)
transforms 2D matrix to 1D row or column vector by taking sum, minimum, maximum or mean value over all the rows
|
static void |
opencv_imgproc.remap(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat map1,
opencv_core.Mat map2,
int interpolation) |
static void |
opencv_imgproc.remap(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat map1,
opencv_core.Mat map2,
int interpolation,
int borderMode,
opencv_core.Scalar borderValue)
warps the image using the precomputed maps.
|
static opencv_core.Mat |
opencv_core.repeat(opencv_core.Mat src,
int ny,
int nx) |
static void |
opencv_core.repeat(opencv_core.Mat src,
int ny,
int nx,
opencv_core.Mat dst)
replicates the input matrix the specified number of times in the horizontal and/or vertical direction
|
static void |
opencv_calib3d.reprojectImageTo3D(opencv_core.Mat disparity,
opencv_core.Mat _3dImage,
opencv_core.Mat Q) |
static void |
opencv_calib3d.reprojectImageTo3D(opencv_core.Mat disparity,
opencv_core.Mat _3dImage,
opencv_core.Mat Q,
boolean handleMissingValues,
int ddepth)
reprojects disparity image to 3D: (x,y,d)->(X,Y,Z) using the matrix Q returned by cv::stereoRectify
|
static void |
opencv_imgproc.resize(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dsize) |
static void |
opencv_imgproc.resize(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dsize,
double fx,
double fy,
int interpolation)
resizes the image
|
boolean |
opencv_highgui.VideoCapture.retrieve(opencv_core.Mat image) |
boolean |
opencv_highgui.VideoCapture.retrieve(opencv_core.Mat image,
int channel) |
static boolean |
opencv_contrib.RGBDOdometry(opencv_core.Mat Rt,
opencv_core.Mat initRt,
opencv_core.Mat image0,
opencv_core.Mat depth0,
opencv_core.Mat mask0,
opencv_core.Mat image1,
opencv_core.Mat depth1,
opencv_core.Mat mask1,
opencv_core.Mat cameraMatrix) |
static boolean |
opencv_contrib.RGBDOdometry(opencv_core.Mat Rt,
opencv_core.Mat initRt,
opencv_core.Mat image0,
opencv_core.Mat depth0,
opencv_core.Mat mask0,
opencv_core.Mat image1,
opencv_core.Mat depth1,
opencv_core.Mat mask1,
opencv_core.Mat cameraMatrix,
float minDepth,
float maxDepth,
float maxDepthDiff,
int[] iterCounts,
float[] minGradientMagnitudes,
int transformType) |
static boolean |
opencv_contrib.RGBDOdometry(opencv_core.Mat Rt,
opencv_core.Mat initRt,
opencv_core.Mat image0,
opencv_core.Mat depth0,
opencv_core.Mat mask0,
opencv_core.Mat image1,
opencv_core.Mat depth1,
opencv_core.Mat mask1,
opencv_core.Mat cameraMatrix,
float minDepth,
float maxDepth,
float maxDepthDiff,
IntBuffer iterCounts,
FloatBuffer minGradientMagnitudes,
int transformType) |
static boolean |
opencv_contrib.RGBDOdometry(opencv_core.Mat Rt,
opencv_core.Mat initRt,
opencv_core.Mat image0,
opencv_core.Mat depth0,
opencv_core.Mat mask0,
opencv_core.Mat image1,
opencv_core.Mat depth1,
opencv_core.Mat mask1,
opencv_core.Mat cameraMatrix,
float minDepth,
float maxDepth,
float maxDepthDiff,
IntPointer iterCounts,
FloatPointer minGradientMagnitudes,
int transformType) |
static void |
opencv_calib3d.Rodrigues(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_calib3d.Rodrigues(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat jacobian)
converts rotation vector to rotation matrix or vice versa using Rodrigues transformation
|
static opencv_core.Point3d |
opencv_calib3d.RQDecomp3x3(opencv_core.Mat src,
opencv_core.Mat mtxR,
opencv_core.Mat mtxQ) |
static opencv_core.Point3d |
opencv_calib3d.RQDecomp3x3(opencv_core.Mat src,
opencv_core.Mat mtxR,
opencv_core.Mat mtxQ,
opencv_core.Mat Qx,
opencv_core.Mat Qy,
opencv_core.Mat Qz)
Computes RQ decomposition of 3x3 matrix
|
void |
opencv_contrib.LevMarqSparse.run(int npoints,
int ncameras,
int nPointParams,
int nCameraParams,
int nErrParams,
opencv_core.Mat visibility,
opencv_core.Mat P0,
opencv_core.Mat X,
opencv_core.TermCriteria criteria,
opencv_contrib.LevMarqSparse.Fjac_int_int_Mat_Mat_Mat_Mat_Pointer fjac,
opencv_contrib.LevMarqSparse.Func_int_int_Mat_Mat_Mat_Pointer func,
Pointer data) |
void |
opencv_contrib.Retina.run(opencv_core.Mat inputImage)
method which allows retina to be applied on an input image, after run, encapsulated retina module is ready to deliver its outputs using dedicated acccessors, see getParvo and getMagno methods
|
void |
opencv_videostab.IDenseOptFlowEstimator.run(opencv_core.Mat frame0,
opencv_core.Mat frame1,
opencv_core.Mat flowX,
opencv_core.Mat flowY,
opencv_core.Mat errors) |
void |
opencv_videostab.DensePyrLkOptFlowEstimatorGpu.run(opencv_core.Mat frame0,
opencv_core.Mat frame1,
opencv_core.Mat flowX,
opencv_core.Mat flowY,
opencv_core.Mat errors) |
void |
opencv_videostab.ISparseOptFlowEstimator.run(opencv_core.Mat frame0,
opencv_core.Mat frame1,
opencv_core.Mat points0,
opencv_core.Mat points1,
opencv_core.Mat status,
opencv_core.Mat errors) |
void |
opencv_videostab.SparsePyrLkOptFlowEstimator.run(opencv_core.Mat frame0,
opencv_core.Mat frame1,
opencv_core.Mat points0,
opencv_core.Mat points1,
opencv_core.Mat status,
opencv_core.Mat errors) |
static void |
opencv_features2d.KeyPointsFilter.runByPixelsMask(opencv_features2d.KeyPoint keypoints,
opencv_core.Mat mask) |
static void |
opencv_core.scaleAdd(opencv_core.Mat src1,
double alpha,
opencv_core.Mat src2,
opencv_core.Mat dst)
adds scaled array to another one (dst = alpha*src1 + src2)
|
static void |
opencv_imgproc.Scharr(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
int dx,
int dy) |
static void |
opencv_imgproc.Scharr(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
int dx,
int dy,
double scale,
double delta,
int borderType)
applies the vertical or horizontal Scharr operator to the image
|
static void |
opencv_video.segmentMotion(opencv_core.Mat mhi,
opencv_core.Mat segmask,
opencv_core.Rect boundingRects,
double timestamp,
double segThresh) |
static void |
opencv_imgproc.sepFilter2D(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Mat kernelX,
opencv_core.Mat kernelY) |
static void |
opencv_imgproc.sepFilter2D(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Mat kernelX,
opencv_core.Mat kernelY,
opencv_core.Point anchor,
double delta,
int borderType)
applies separable 2D linear filter to the image
|
void |
opencv_core.Algorithm.set(BytePointer name,
opencv_core.Mat value) |
void |
opencv_core.Algorithm.set(String name,
opencv_core.Mat value) |
void |
opencv_stitching.ProjectorBase.setCameraParams(opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T) |
static void |
opencv_core.setIdentity(opencv_core.Mat mtx) |
static void |
opencv_core.setIdentity(opencv_core.Mat mtx,
opencv_core.Scalar s)
initializes scaled identity matrix
|
boolean |
opencv_objdetect.CascadeClassifier.setImage(opencv_core.Mat arg0) |
boolean |
opencv_objdetect.FeatureEvaluator.setImage(opencv_core.Mat img,
opencv_core.Size origWinSize) |
void |
opencv_core.Algorithm.setMat(BytePointer name,
opencv_core.Mat value) |
void |
opencv_core.Algorithm.setMat(String name,
opencv_core.Mat value) |
void |
opencv_stitching.Stitcher.setMatchingMask(opencv_core.Mat mask) |
void |
opencv_stitching.BundleAdjusterBase.setRefinementMask(opencv_core.Mat mask) |
void |
opencv_objdetect.HOGDescriptor.setSVMDetector(opencv_core.Mat _svmdetector) |
void |
opencv_imgproc.GeneralizedHough.setTemplate(opencv_core.Mat templ) |
void |
opencv_imgproc.GeneralizedHough.setTemplate(opencv_core.Mat templ,
int cannyThreshold,
opencv_core.Point templCenter)
set template to search
|
void |
opencv_imgproc.GeneralizedHough.setTemplate(opencv_core.Mat edges,
opencv_core.Mat dx,
opencv_core.Mat dy) |
void |
opencv_imgproc.GeneralizedHough.setTemplate(opencv_core.Mat edges,
opencv_core.Mat dx,
opencv_core.Mat dy,
opencv_core.Point templCenter) |
opencv_core.Mat |
opencv_core.Mat.setTo(opencv_core.Mat value) |
opencv_core.Mat |
opencv_core.Mat.setTo(opencv_core.Mat value,
opencv_core.Mat mask)
sets some of the matrix elements to s, according to the mask
|
void |
opencv_features2d.BOWImgDescriptorExtractor.setVocabulary(opencv_core.Mat vocabulary) |
opencv_highgui.VideoWriter |
opencv_highgui.VideoWriter.shiftLeft(opencv_core.Mat image) |
opencv_highgui.VideoCapture |
opencv_highgui.VideoCapture.shiftRight(opencv_core.Mat image) |
static void |
opencv_imgproc.Sobel(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
int dx,
int dy) |
static void |
opencv_imgproc.Sobel(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
int dx,
int dy,
int ksize,
double scale,
double delta,
int borderType)
applies generalized Sobel operator to the image
|
static boolean |
opencv_core.solve(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static boolean |
opencv_core.solve(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
int flags)
solves linear system or a least-square problem
|
static int |
opencv_core.solveCubic(opencv_core.Mat coeffs,
opencv_core.Mat roots)
finds real roots of a cubic polynomial
|
static boolean |
opencv_calib3d.solvePnP(opencv_core.Mat objectPoints,
opencv_core.Mat imagePoints,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec) |
static boolean |
opencv_calib3d.solvePnP(opencv_core.Mat objectPoints,
opencv_core.Mat imagePoints,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec,
boolean useExtrinsicGuess,
int flags) |
static void |
opencv_calib3d.solvePnPRansac(opencv_core.Mat objectPoints,
opencv_core.Mat imagePoints,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec) |
static void |
opencv_calib3d.solvePnPRansac(opencv_core.Mat objectPoints,
opencv_core.Mat imagePoints,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec,
boolean useExtrinsicGuess,
int iterationsCount,
float reprojectionError,
int minInliersCount,
opencv_core.Mat inliers,
int flags)
computes the camera pose from a few 3D points and the corresponding projections.
|
static double |
opencv_core.solvePoly(opencv_core.Mat coeffs,
opencv_core.Mat roots) |
static double |
opencv_core.solvePoly(opencv_core.Mat coeffs,
opencv_core.Mat roots,
int maxIters)
finds real and complex roots of a polynomial
|
static void |
opencv_core.SVD.solveZ(opencv_core.Mat src,
opencv_core.Mat dst)
finds dst = arg min_{|dst|=1} |m*dst|
|
static void |
opencv_core.sort(opencv_core.Mat src,
opencv_core.Mat dst,
int flags)
sorts independently each matrix row or each matrix column
|
static void |
opencv_core.sortIdx(opencv_core.Mat src,
opencv_core.Mat dst,
int flags)
sorts independently each matrix row or each matrix column
|
static boolean |
opencv_contrib.SpinImageModel.spinCorrelation(opencv_core.Mat spin1,
opencv_core.Mat spin2,
float lambda,
float[] result) |
static boolean |
opencv_contrib.SpinImageModel.spinCorrelation(opencv_core.Mat spin1,
opencv_core.Mat spin2,
float lambda,
FloatBuffer result) |
static boolean |
opencv_contrib.SpinImageModel.spinCorrelation(opencv_core.Mat spin1,
opencv_core.Mat spin2,
float lambda,
FloatPointer result) |
static void |
opencv_core.split(opencv_core.Mat src,
opencv_core.Mat mvbegin)
copies each plane of a multi-channel array to a dedicated array
|
static void |
opencv_core.split(opencv_core.Mat m,
opencv_core.MatVector mv) |
static void |
opencv_core.sqrt(opencv_core.Mat src,
opencv_core.Mat dst)
computes square root of each matrix element (dst = src**0.5)
|
void |
opencv_contrib.SelfSimDescriptor.SSD(opencv_core.Mat img,
opencv_core.Point pt,
opencv_core.Mat ssd) |
opencv_core.Mat |
opencv_videostab.MotionFilterBase.stabilize(int index,
opencv_core.Mat motions,
int size) |
opencv_core.Mat |
opencv_videostab.GaussianMotionFilter.stabilize(int index,
opencv_core.Mat motions,
int size) |
void |
opencv_videostab.IMotionStabilizer.stabilize(opencv_core.Mat motions,
int size,
opencv_core.Mat stabilizationMotions) |
void |
opencv_videostab.MotionFilterBase.stabilize(opencv_core.Mat motions,
int size,
opencv_core.Mat stabilizationMotions) |
int |
opencv_imgproc.FilterEngine.start(opencv_core.Mat src) |
int |
opencv_imgproc.FilterEngine.start(opencv_core.Mat src,
opencv_core.Rect srcRoi,
boolean isolated,
int maxBufRows)
starts filtering of the specified ROI of the specified image.
|
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.statePost(opencv_core.Mat statePost) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.statePre(opencv_core.Mat statePre) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F,
opencv_core.TermCriteria criteria,
int flags)
finds intrinsic and extrinsic parameters of a stereo camera
|
static void |
opencv_calib3d.stereoRectify(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat Q) |
static void |
opencv_calib3d.stereoRectify(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat Q,
int flags,
double alpha,
opencv_core.Size newImageSize,
opencv_core.Rect validPixROI1,
opencv_core.Rect validPixROI2)
computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters
|
static boolean |
opencv_calib3d.stereoRectifyUncalibrated(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat F,
opencv_core.Size imgSize,
opencv_core.Mat H1,
opencv_core.Mat H2) |
static boolean |
opencv_calib3d.stereoRectifyUncalibrated(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat F,
opencv_core.Size imgSize,
opencv_core.Mat H1,
opencv_core.Mat H2,
double threshold)
computes the rectification transformation for an uncalibrated stereo camera (zero distortion is assumed)
|
int |
opencv_stitching.Stitcher.stitch(opencv_core.Mat images,
opencv_core.Mat pano) |
int |
opencv_stitching.Stitcher.stitch(opencv_core.Mat images,
opencv_core.RectVectorVector rois,
opencv_core.Mat pano) |
static opencv_core.Mat |
opencv_contrib.subspaceProject(opencv_core.Mat W,
opencv_core.Mat mean,
opencv_core.Mat src) |
static opencv_core.Mat |
opencv_contrib.subspaceReconstruct(opencv_core.Mat W,
opencv_core.Mat mean,
opencv_core.Mat src) |
static void |
opencv_core.subtract(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.subtract(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask,
int dtype)
subtracts one matrix from another (dst = src1 - src2)
|
static opencv_core.Scalar |
opencv_core.sumElems(opencv_core.Mat src)
computes sum of array elements
|
static void |
opencv_core.SVBackSubst(opencv_core.Mat w,
opencv_core.Mat u,
opencv_core.Mat vt,
opencv_core.Mat rhs,
opencv_core.Mat dst)
performs back substitution for the previously computed SVD
|
static void |
opencv_core.SVDecomp(opencv_core.Mat src,
opencv_core.Mat w,
opencv_core.Mat u,
opencv_core.Mat vt) |
static void |
opencv_core.SVDecomp(opencv_core.Mat src,
opencv_core.Mat w,
opencv_core.Mat u,
opencv_core.Mat vt,
int flags)
computes SVD of src
|
static void |
opencv_core.swap(opencv_core.Mat a,
opencv_core.Mat b)
swaps two matrices
|
opencv_stitching.CameraParams |
opencv_stitching.CameraParams.t(opencv_core.Mat t) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.temp1(opencv_core.Mat temp1) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.temp2(opencv_core.Mat temp2) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.temp3(opencv_core.Mat temp3) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.temp4(opencv_core.Mat temp4) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.temp5(opencv_core.Mat temp5) |
static double |
opencv_imgproc.threshold(opencv_core.Mat src,
opencv_core.Mat dst,
double thresh,
double maxval,
int type)
applies fixed threshold to the image
|
opencv_core.Mat |
opencv_contrib.LogPolar_Interp.to_cartesian(opencv_core.Mat source)
Transformation from cortical image to retinal (inverse log-polar) image.
|
opencv_core.Mat |
opencv_contrib.LogPolar_Overlapping.to_cartesian(opencv_core.Mat source)
Transformation from cortical image to retinal (inverse log-polar) image.
|
opencv_core.Mat |
opencv_contrib.LogPolar_Adjacent.to_cartesian(opencv_core.Mat source)
Transformation from cortical image to retinal (inverse log-polar) image.
|
opencv_core.Mat |
opencv_contrib.LogPolar_Interp.to_cortical(opencv_core.Mat source)
Transformation from Cartesian image to cortical (log-polar) image.
|
opencv_core.Mat |
opencv_contrib.LogPolar_Overlapping.to_cortical(opencv_core.Mat source)
Transformation from Cartesian image to cortical (log-polar) image.
|
opencv_core.Mat |
opencv_contrib.LogPolar_Adjacent.to_cortical(opencv_core.Mat source)
Transformation from Cartesian image to cortical (log-polar) image.
|
static opencv_core.Scalar |
opencv_core.trace(opencv_core.Mat mtx)
computes trace of a matrix
|
boolean |
opencv_ml.CvSVM.train_auto(opencv_core.Mat trainData,
opencv_core.Mat responses,
opencv_core.Mat varIdx,
opencv_core.Mat sampleIdx,
opencv_ml.CvSVMParams params) |
boolean |
opencv_ml.CvSVM.train_auto(opencv_core.Mat trainData,
opencv_core.Mat responses,
opencv_core.Mat varIdx,
opencv_core.Mat sampleIdx,
opencv_ml.CvSVMParams params,
int k_fold,
opencv_ml.CvParamGrid Cgrid,
opencv_ml.CvParamGrid gammaGrid,
opencv_ml.CvParamGrid pGrid,
opencv_ml.CvParamGrid nuGrid,
opencv_ml.CvParamGrid coeffGrid,
opencv_ml.CvParamGrid degreeGrid,
boolean balanced) |
boolean |
opencv_legacy.CvEM.train(opencv_core.Mat samples) |
boolean |
opencv_ml.EM.train(opencv_core.Mat samples) |
boolean |
opencv_ml.CvDTree.train(opencv_core.Mat trainData,
int tflag,
opencv_core.Mat responses) |
boolean |
opencv_ml.CvRTrees.train(opencv_core.Mat trainData,
int tflag,
opencv_core.Mat responses) |
boolean |
opencv_ml.CvERTrees.train(opencv_core.Mat trainData,
int tflag,
opencv_core.Mat responses) |
boolean |
opencv_ml.CvBoost.train(opencv_core.Mat trainData,
int tflag,
opencv_core.Mat responses) |
boolean |
opencv_ml.CvGBTrees.train(opencv_core.Mat trainData,
int tflag,
opencv_core.Mat responses) |
boolean |
opencv_ml.CvBoost.train(opencv_core.Mat trainData,
int tflag,
opencv_core.Mat responses,
opencv_core.Mat varIdx,
opencv_core.Mat sampleIdx,
opencv_core.Mat varType,
opencv_core.Mat missingDataMask,
opencv_ml.CvBoostParams params,
boolean update) |
boolean |
opencv_ml.CvDTree.train(opencv_core.Mat trainData,
int tflag,
opencv_core.Mat responses,
opencv_core.Mat varIdx,
opencv_core.Mat sampleIdx,
opencv_core.Mat varType,
opencv_core.Mat missingDataMask,
opencv_ml.CvDTreeParams params) |
boolean |
opencv_ml.CvGBTrees.train(opencv_core.Mat trainData,
int tflag,
opencv_core.Mat responses,
opencv_core.Mat varIdx,
opencv_core.Mat sampleIdx,
opencv_core.Mat varType,
opencv_core.Mat missingDataMask,
opencv_ml.CvGBTreesParams params,
boolean update) |
boolean |
opencv_ml.CvRTrees.train(opencv_core.Mat trainData,
int tflag,
opencv_core.Mat responses,
opencv_core.Mat varIdx,
opencv_core.Mat sampleIdx,
opencv_core.Mat varType,
opencv_core.Mat missingDataMask,
opencv_ml.CvRTParams params) |
boolean |
opencv_ml.CvERTrees.train(opencv_core.Mat trainData,
int tflag,
opencv_core.Mat responses,
opencv_core.Mat varIdx,
opencv_core.Mat sampleIdx,
opencv_core.Mat varType,
opencv_core.Mat missingDataMask,
opencv_ml.CvRTParams params) |
boolean |
opencv_ml.CvNormalBayesClassifier.train(opencv_core.Mat trainData,
opencv_core.Mat responses) |
boolean |
opencv_ml.CvKNearest.train(opencv_core.Mat trainData,
opencv_core.Mat responses) |
boolean |
opencv_ml.CvSVM.train(opencv_core.Mat trainData,
opencv_core.Mat responses) |
int |
opencv_ml.CvANN_MLP.train(opencv_core.Mat inputs,
opencv_core.Mat outputs,
opencv_core.Mat sampleWeights) |
boolean |
opencv_ml.CvKNearest.train(opencv_core.Mat trainData,
opencv_core.Mat responses,
opencv_core.Mat sampleIdx,
boolean isRegression,
int maxK,
boolean updateBase) |
boolean |
opencv_ml.EM.train(opencv_core.Mat samples,
opencv_core.Mat logLikelihoods,
opencv_core.Mat labels,
opencv_core.Mat probs) |
boolean |
opencv_ml.CvNormalBayesClassifier.train(opencv_core.Mat trainData,
opencv_core.Mat responses,
opencv_core.Mat varIdx,
opencv_core.Mat sampleIdx,
boolean update) |
int |
opencv_ml.CvANN_MLP.train(opencv_core.Mat inputs,
opencv_core.Mat outputs,
opencv_core.Mat sampleWeights,
opencv_core.Mat sampleIdx,
opencv_ml.CvANN_MLP_TrainParams params,
int flags) |
boolean |
opencv_ml.CvSVM.train(opencv_core.Mat trainData,
opencv_core.Mat responses,
opencv_core.Mat varIdx,
opencv_core.Mat sampleIdx,
opencv_ml.CvSVMParams params) |
boolean |
opencv_legacy.CvEM.train(opencv_core.Mat samples,
opencv_core.Mat sampleIdx,
opencv_legacy.CvEMParams params,
opencv_core.Mat labels) |
void |
opencv_contrib.FaceRecognizer.train(opencv_core.MatVector src,
opencv_core.Mat labels)
virtual destructor
|
boolean |
opencv_ml.EM.trainE(opencv_core.Mat samples,
opencv_core.Mat means0) |
boolean |
opencv_ml.EM.trainE(opencv_core.Mat samples,
opencv_core.Mat means0,
opencv_core.Mat covs0,
opencv_core.Mat weights0,
opencv_core.Mat logLikelihoods,
opencv_core.Mat labels,
opencv_core.Mat probs) |
void |
opencv_legacy.FernClassifier.trainFromSingleView(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints) |
void |
opencv_legacy.FernClassifier.trainFromSingleView(opencv_core.Mat image,
opencv_features2d.KeyPoint keypoints,
int _patchSize,
int _signatureSize,
int _nstructs,
int _structSize,
int _nviews,
int _compressionMethod,
opencv_legacy.PatchGenerator patchGenerator) |
boolean |
opencv_ml.EM.trainM(opencv_core.Mat samples,
opencv_core.Mat probs0) |
boolean |
opencv_ml.EM.trainM(opencv_core.Mat samples,
opencv_core.Mat probs0,
opencv_core.Mat logLikelihoods,
opencv_core.Mat labels,
opencv_core.Mat probs) |
static void |
opencv_core.transform(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat m)
performs affine transformation of each element of multi-channel input matrix
|
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.transitionMatrix(opencv_core.Mat transitionMatrix) |
static void |
opencv_core.transpose(opencv_core.Mat src,
opencv_core.Mat dst)
transposes the matrix
|
static void |
opencv_calib3d.triangulatePoints(opencv_core.Mat projMatr1,
opencv_core.Mat projMatr2,
opencv_core.Mat projPoints1,
opencv_core.Mat projPoints2,
opencv_core.Mat points4D) |
opencv_core.SVD |
opencv_core.SVD.u(opencv_core.Mat u) |
static void |
opencv_imgproc.undistort(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs) |
static void |
opencv_imgproc.undistort(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat newCameraMatrix)
corrects lens distortion for the given camera matrix and distortion coefficients
|
static void |
opencv_imgproc.undistortPoints(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs) |
static void |
opencv_imgproc.undistortPoints(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat R,
opencv_core.Mat P)
returns points' coordinates after lens distortion correction
|
void |
opencv_contrib.FaceRecognizer.update(opencv_core.MatVector src,
opencv_core.Mat labels) |
static void |
opencv_video.updateMotionHistory(opencv_core.Mat silhouette,
opencv_core.Mat mhi,
double timestamp,
double duration)
updates motion history image using the current silhouette
|
void |
opencv_contrib.CvHybridTracker.updateTracker(opencv_core.Mat image) |
opencv_core.RotatedRect |
opencv_contrib.CvMeanShiftTracker.updateTrackingWindow(opencv_core.Mat image) |
opencv_core.Rect |
opencv_contrib.CvFeatureTracker.updateTrackingWindow(opencv_core.Mat image) |
opencv_core.Rect |
opencv_contrib.CvFeatureTracker.updateTrackingWindowWithFlow(opencv_core.Mat image) |
opencv_core.Rect |
opencv_contrib.CvFeatureTracker.updateTrackingWindowWithSIFT(opencv_core.Mat image) |
static void |
opencv_calib3d.validateDisparity(opencv_core.Mat disparity,
opencv_core.Mat cost,
int minDisparity,
int numberOfDisparities) |
static void |
opencv_calib3d.validateDisparity(opencv_core.Mat disparity,
opencv_core.Mat cost,
int minDisparity,
int numberOfDisparities,
int disp12MaxDisp)
validates disparity using the left-right check.
|
static void |
opencv_core.vconcat(opencv_core.Mat src,
long nsrc,
opencv_core.Mat dst) |
static void |
opencv_core.vconcat(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.vconcat(opencv_core.MatVector src,
opencv_core.Mat dst) |
opencv_core.SVD |
opencv_core.SVD.vt(opencv_core.Mat vt) |
opencv_core.SVD |
opencv_core.SVD.w(opencv_core.Mat w) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarperGpu.warp(opencv_core.GpuMat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.GpuMat dst) |
opencv_core.Point |
opencv_stitching.DetailSphericalWarperGpu.warp(opencv_core.GpuMat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.GpuMat dst) |
opencv_core.Point |
opencv_stitching.DetailCylindricalWarperGpu.warp(opencv_core.GpuMat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.GpuMat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarperGpu.warp(opencv_core.GpuMat src,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
int interp_mode,
int border_mode,
opencv_core.GpuMat dst) |
opencv_core.Point |
opencv_stitching.RotationWarper.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarperGpu.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailSphericalWarperGpu.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailCylindricalWarperGpu.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarper.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarperGpu.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
static void |
opencv_imgproc.warpAffine(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize) |
static void |
opencv_imgproc.warpAffine(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize,
int flags,
int borderMode,
opencv_core.Scalar borderValue)
warps the image using affine transformation
|
void |
opencv_stitching.RotationWarper.warpBackward(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Size dst_size,
opencv_core.Mat dst) |
static void |
opencv_imgproc.warpPerspective(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize) |
static void |
opencv_imgproc.warpPerspective(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize,
int flags,
int borderMode,
opencv_core.Scalar borderValue)
warps the image using perspective transformation
|
opencv_core.Point2f |
opencv_stitching.RotationWarper.warpPoint(opencv_core.Point2f pt,
opencv_core.Mat K,
opencv_core.Mat R) |
opencv_core.Point2f |
opencv_stitching.DetailPlaneWarper.warpPoint(opencv_core.Point2f pt,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T) |
opencv_core.Rect |
opencv_stitching.RotationWarper.warpRoi(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.warpRoi(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T) |
void |
opencv_legacy.PatchGenerator.warpWholeImage(opencv_core.Mat image,
opencv_core.Mat matT,
opencv_core.Mat buf,
opencv_core.Mat warped,
int border,
opencv_core.RNG rng) |
static void |
opencv_imgproc.watershed(opencv_core.Mat image,
opencv_core.Mat markers)
segments the image using watershed algorithm
|
void |
opencv_highgui.VideoWriter.write(opencv_core.Mat image) |
| Modifier and Type | Field and Description |
|---|---|
static opencv_core.Mat |
opencv_core.AbstractMat.EMPTY |
Copyright © 2014. All Rights Reserved.