| Package | Description |
|---|---|
| org.bytedeco.javacpp |
| Modifier and Type | Method and Description |
|---|---|
opencv_core.Point |
opencv_imgproc.BaseFilter.anchor() |
opencv_core.Point |
opencv_imgproc.FilterEngine.anchor() |
opencv_core.Point |
opencv_core.Rect.br()
the bottom-right corner
|
opencv_core.Point |
opencv_core.PointVectorVector.get(long i,
long j) |
opencv_core.Point |
opencv_objdetect.DetectionROI.locations() |
opencv_core.Point |
opencv_core.LineIterator.pos()
returns coordinates of the current pixel
|
opencv_core.Point |
opencv_core.MatConstIterator.pos()
returns the current iterator position
|
opencv_core.Point |
opencv_core.Point.position(int position) |
opencv_core.Point |
opencv_core.Point.put(opencv_core.Point pt) |
static opencv_core.Point |
opencv_stitching.resultTl(opencv_core.Point corners) |
opencv_core.Point |
opencv_core.Rect.tl()
the top-left corner
|
opencv_core.Point |
opencv_stitching.DetailPlaneWarperGpu.warp(opencv_core.GpuMat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.GpuMat dst) |
opencv_core.Point |
opencv_stitching.DetailSphericalWarperGpu.warp(opencv_core.GpuMat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.GpuMat dst) |
opencv_core.Point |
opencv_stitching.DetailCylindricalWarperGpu.warp(opencv_core.GpuMat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.GpuMat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarperGpu.warp(opencv_core.GpuMat src,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
int interp_mode,
int border_mode,
opencv_core.GpuMat dst) |
opencv_core.Point |
opencv_stitching.RotationWarper.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarperGpu.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailSphericalWarperGpu.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailCylindricalWarperGpu.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarper.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarperGpu.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_core.Point.x(int x) |
opencv_core.Point |
opencv_core.Point.y(int y) |
| Modifier and Type | Method and Description |
|---|---|
static void |
opencv_imgproc.adaptiveBilateralFilter(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize,
double sigmaSpace,
double maxSigmaColor,
opencv_core.Point anchor,
int borderType)
smooths the image using adaptive bilateral filter
|
static void |
opencv_highgui.addText(opencv_core.Mat img,
BytePointer text,
opencv_core.Point org,
opencv_core.CvFont font) |
static void |
opencv_highgui.addText(opencv_core.Mat img,
String text,
opencv_core.Point org,
opencv_core.CvFont font) |
opencv_imgproc.BaseFilter |
opencv_imgproc.BaseFilter.anchor(opencv_core.Point anchor) |
opencv_imgproc.FilterEngine |
opencv_imgproc.FilterEngine.anchor(opencv_core.Point anchor) |
void |
opencv_stitching.ExposureCompensator.apply(int index,
opencv_core.Point corner,
opencv_core.Mat image,
opencv_core.Mat mask) |
void |
opencv_stitching.NoExposureCompensator.apply(int arg0,
opencv_core.Point arg1,
opencv_core.Mat arg2,
opencv_core.Mat arg3) |
void |
opencv_stitching.GainCompensator.apply(int index,
opencv_core.Point corner,
opencv_core.Mat image,
opencv_core.Mat mask) |
void |
opencv_stitching.BlocksGainCompensator.apply(int index,
opencv_core.Point corner,
opencv_core.Mat image,
opencv_core.Mat mask) |
void |
opencv_imgproc.FilterEngine.apply(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Rect srcRoi,
opencv_core.Point dstOfs,
boolean isolated)
applies filter to the specified ROI of the image.
|
static void |
opencv_imgproc.blur(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize,
opencv_core.Point anchor,
int borderType)
a synonym for normalized box filter
|
static void |
opencv_imgproc.boxFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Size ksize,
opencv_core.Point anchor,
boolean normalize,
int borderType)
smooths the image using the box filter.
|
static boolean |
opencv_core.checkRange(opencv_core.Mat a,
boolean quiet,
opencv_core.Point pos,
double minVal,
double maxVal)
checks that each matrix element is within the specified range.
|
static void |
opencv_core.circle(opencv_core.Mat img,
opencv_core.Point center,
int radius,
opencv_core.Scalar color) |
static void |
opencv_core.circle(opencv_core.Mat img,
opencv_core.Point center,
int radius,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
draws the circle outline or a solid circle in the image
|
static boolean |
opencv_core.clipLine(opencv_core.Rect imgRect,
opencv_core.Point pt1,
opencv_core.Point pt2)
clips the line segment by the rectangle imgRect
|
static boolean |
opencv_core.clipLine(opencv_core.Size imgSize,
opencv_core.Point pt1,
opencv_core.Point pt2)
clips the line segment by the rectangle Rect(0, 0, imgSize.width, imgSize.height)
|
void |
opencv_contrib.SelfSimDescriptor.compute(opencv_core.Mat img,
float[] descriptors,
opencv_core.Size winStride,
opencv_core.Point locations) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
float[] descriptors,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.Point locations) |
void |
opencv_contrib.SelfSimDescriptor.compute(opencv_core.Mat img,
FloatBuffer descriptors,
opencv_core.Size winStride,
opencv_core.Point locations) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
FloatBuffer descriptors,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.Point locations) |
void |
opencv_contrib.SelfSimDescriptor.compute(opencv_core.Mat img,
FloatPointer descriptors,
opencv_core.Size winStride,
opencv_core.Point locations) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
FloatPointer descriptors,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.Point locations) |
boolean |
opencv_core.Rect.contains(opencv_core.Point pt)
checks whether the rectangle contains the point
|
static opencv_imgproc.FilterEngine |
opencv_imgproc.createBoxFilter(int srcType,
int dstType,
opencv_core.Size ksize,
opencv_core.Point anchor,
boolean normalize,
int borderType)
returns box filter engine
|
static opencv_imgproc.FilterEngine |
opencv_imgproc.createLinearFilter(int srcType,
int dstType,
opencv_core.Mat kernel,
opencv_core.Point _anchor,
double delta,
int rowBorderType,
int columnBorderType,
opencv_core.Scalar borderValue)
returns the non-separable linear filter engine
|
static opencv_imgproc.FilterEngine |
opencv_imgproc.createMorphologyFilter(int op,
int type,
opencv_core.Mat kernel,
opencv_core.Point anchor,
int rowBorderType,
int columnBorderType,
opencv_core.Scalar borderValue)
returns morphological filter engine.
|
static opencv_imgproc.FilterEngine |
opencv_imgproc.createSeparableLinearFilter(int srcType,
int dstType,
opencv_core.Mat rowKernel,
opencv_core.Mat columnKernel,
opencv_core.Point anchor,
double delta,
int rowBorderType,
int columnBorderType,
opencv_core.Scalar borderValue)
returns the separable linear filter engine
|
opencv_core.Rect |
opencv_stitching.FeatherBlender.createWeightMaps(opencv_core.MatVector masks,
opencv_core.Point corners,
opencv_core.MatVector weight_maps) |
double |
opencv_core.Point.cross(opencv_core.Point pt)
cross-product
|
double |
opencv_core.Point.ddot(opencv_core.Point pt)
dot product computed in double-precision arithmetics
|
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations,
double[] weights) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations,
double[] weights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.Point searchLocations) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations,
DoubleBuffer weights) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations,
DoubleBuffer weights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.Point searchLocations) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.Point searchLocations) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations,
DoublePointer weights) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.Point foundLocations,
DoublePointer weights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.Point searchLocations) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.Point locations,
opencv_core.Point foundLocations,
double[] confidences) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.Point locations,
opencv_core.Point foundLocations,
double[] confidences,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.Point locations,
opencv_core.Point foundLocations,
DoubleBuffer confidences) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.Point locations,
opencv_core.Point foundLocations,
DoubleBuffer confidences,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.Point locations,
opencv_core.Point foundLocations,
DoublePointer confidences) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.Point locations,
opencv_core.Point foundLocations,
DoublePointer confidences,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding) |
static void |
opencv_imgproc.dilate(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat kernel,
opencv_core.Point anchor,
int iterations,
int borderType,
opencv_core.Scalar borderValue)
dilates the image (applies the local maximum operator)
|
int |
opencv_core.Point.dot(opencv_core.Point pt)
dot product
|
static void |
opencv_imgproc.drawContours(opencv_core.Mat image,
opencv_core.MatVector contours,
int contourIdx,
opencv_core.Scalar color,
int thickness,
int lineType,
opencv_core.Mat hierarchy,
int maxLevel,
opencv_core.Point offset)
draws contours in the image
|
static void |
opencv_core.ellipse(opencv_core.Mat img,
opencv_core.Point center,
opencv_core.Size axes,
double angle,
double startAngle,
double endAngle,
opencv_core.Scalar color) |
static void |
opencv_core.ellipse(opencv_core.Mat img,
opencv_core.Point center,
opencv_core.Size axes,
double angle,
double startAngle,
double endAngle,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
draws an elliptic arc, ellipse sector or a rotated ellipse in the image
|
static void |
opencv_core.ellipse2Poly(opencv_core.Point center,
opencv_core.Size axes,
int angle,
int arcStart,
int arcEnd,
int delta,
opencv_core.Point pts)
converts elliptic arc to a polygonal curve
|
static void |
opencv_imgproc.erode(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat kernel,
opencv_core.Point anchor,
int iterations,
int borderType,
opencv_core.Scalar borderValue)
erodes the image (applies the local minimum operator)
|
void |
opencv_stitching.Blender.feed(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Point tl) |
void |
opencv_stitching.FeatherBlender.feed(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Point tl) |
void |
opencv_stitching.MultiBandBlender.feed(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Point tl) |
void |
opencv_stitching.ExposureCompensator.feed(opencv_core.Point corners,
opencv_core.MatVector images,
opencv_core.MatVector masks) |
void |
opencv_stitching.ExposureCompensator.feed(opencv_core.Point corners,
opencv_core.MatVector images,
opencv_stitching.MatBytePairVector masks) |
void |
opencv_stitching.NoExposureCompensator.feed(opencv_core.Point arg0,
opencv_core.MatVector arg1,
opencv_stitching.MatBytePairVector arg2) |
void |
opencv_stitching.GainCompensator.feed(opencv_core.Point corners,
opencv_core.MatVector images,
opencv_stitching.MatBytePairVector masks) |
void |
opencv_stitching.BlocksGainCompensator.feed(opencv_core.Point corners,
opencv_core.MatVector images,
opencv_stitching.MatBytePairVector masks) |
static void |
opencv_core.fillConvexPoly(opencv_core.Mat img,
opencv_core.Point pts,
int npts,
opencv_core.Scalar color) |
static void |
opencv_core.fillConvexPoly(opencv_core.Mat img,
opencv_core.Point pts,
int npts,
opencv_core.Scalar color,
int lineType,
int shift)
draws a filled convex polygon in the image
|
static void |
opencv_core.fillPoly(opencv_core.Mat img,
opencv_core.MatVector pts,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset) |
static void |
opencv_core.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
int[] npts,
int ncontours,
opencv_core.Scalar color) |
static void |
opencv_core.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
int[] npts,
int ncontours,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset) |
static void |
opencv_core.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
IntBuffer npts,
int ncontours,
opencv_core.Scalar color) |
static void |
opencv_core.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
IntBuffer npts,
int ncontours,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset) |
static void |
opencv_core.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
IntPointer npts,
int ncontours,
opencv_core.Scalar color) |
static void |
opencv_core.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
IntPointer npts,
int ncontours,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset) |
static void |
opencv_core.fillPoly(opencv_core.Mat img,
PointerPointer pts,
IntPointer npts,
int ncontours,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset)
fills an area bounded by one or more polygons
|
static void |
opencv_imgproc.filter2D(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Mat kernel,
opencv_core.Point anchor,
double delta,
int borderType)
applies non-separable 2D linear filter to the image
|
void |
opencv_stitching.SeamFinder.find(opencv_core.MatVector src,
opencv_core.Point corners,
opencv_core.MatVector masks) |
void |
opencv_stitching.NoSeamFinder.find(opencv_core.MatVector arg0,
opencv_core.Point arg1,
opencv_core.MatVector arg2) |
void |
opencv_stitching.PairwiseSeamFinder.find(opencv_core.MatVector src,
opencv_core.Point corners,
opencv_core.MatVector masks) |
void |
opencv_stitching.DpSeamFinder.find(opencv_core.MatVector src,
opencv_core.Point corners,
opencv_core.MatVector masks) |
void |
opencv_stitching.GraphCutSeamFinder.find(opencv_core.MatVector src,
opencv_core.Point corners,
opencv_core.MatVector masks) |
void |
opencv_stitching.GraphCutSeamFinderGpu.find(opencv_core.MatVector src,
opencv_core.Point corners,
opencv_core.MatVector masks) |
void |
opencv_stitching.VoronoiSeamFinder.find(opencv_core.Size size,
opencv_core.Point corners,
opencv_core.MatVector masks) |
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.MatVector contours,
int mode,
int method,
opencv_core.Point offset)
retrieves contours from black-n-white image.
|
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.MatVector contours,
opencv_core.Mat hierarchy,
int mode,
int method,
opencv_core.Point offset)
retrieves contours and the hierarchical information from black-n-white image.
|
static int |
opencv_imgproc.floodFill(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal) |
static int |
opencv_imgproc.floodFill(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal,
opencv_core.Rect rect,
opencv_core.Scalar loDiff,
opencv_core.Scalar upDiff,
int flags)
fills the semi-uniform image region and/or the mask starting from the specified seed point
|
static int |
opencv_imgproc.floodFill(opencv_core.Mat image,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal) |
static int |
opencv_imgproc.floodFill(opencv_core.Mat image,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal,
opencv_core.Rect rect,
opencv_core.Scalar loDiff,
opencv_core.Scalar upDiff,
int flags)
fills the semi-uniform image region starting from the specified seed point
|
static int |
opencv_imgproc.getKernelType(opencv_core.Mat kernel,
opencv_core.Point anchor)
returns type (one of KERNEL_*) of 1D or 2D kernel specified by its coefficients.
|
static opencv_imgproc.BaseFilter |
opencv_imgproc.getLinearFilter(int srcType,
int dstType,
opencv_core.Mat kernel,
opencv_core.Point anchor,
double delta,
int bits)
returns 2D filter with the specified kernel
|
static opencv_imgproc.BaseFilter |
opencv_imgproc.getMorphologyFilter(int op,
int type,
opencv_core.Mat kernel,
opencv_core.Point anchor)
returns 2D morphological filter
|
static opencv_core.Mat |
opencv_imgproc.getStructuringElement(int shape,
opencv_core.Size ksize,
opencv_core.Point anchor)
returns structuring element of the specified shape and size
|
static void |
opencv_core.line(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color) |
static void |
opencv_core.line(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
draws the line segment (pt1, pt2) in the image
|
void |
opencv_core.Mat.locateROI(opencv_core.Size wholeSize,
opencv_core.Point ofs)
locates matrix header within a parent matrix.
|
opencv_objdetect.DetectionROI |
opencv_objdetect.DetectionROI.locations(opencv_core.Point locations) |
static void |
opencv_core.minMaxLoc(opencv_core.Mat src,
double[] minVal,
double[] maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc,
opencv_core.Mat mask) |
static void |
opencv_core.minMaxLoc(opencv_core.Mat src,
DoubleBuffer minVal,
DoubleBuffer maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc,
opencv_core.Mat mask) |
static void |
opencv_core.minMaxLoc(opencv_core.Mat src,
DoublePointer minVal,
DoublePointer maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc,
opencv_core.Mat mask)
finds global minimum and maximum array elements and returns their values and their locations
|
static void |
opencv_imgproc.morphologyEx(opencv_core.Mat src,
opencv_core.Mat dst,
int op,
opencv_core.Mat kernel,
opencv_core.Point anchor,
int iterations,
int borderType,
opencv_core.Scalar borderValue)
applies an advanced morphological operation to the image
|
static boolean |
opencv_stitching.overlapRoi(opencv_core.Point tl1,
opencv_core.Point tl2,
opencv_core.Size sz1,
opencv_core.Size sz2,
opencv_core.Rect roi) |
static void |
opencv_core.polylines(opencv_core.Mat img,
opencv_core.Point pts,
int[] npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color) |
static void |
opencv_core.polylines(opencv_core.Mat img,
opencv_core.Point pts,
int[] npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_core.polylines(opencv_core.Mat img,
opencv_core.Point pts,
IntBuffer npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color) |
static void |
opencv_core.polylines(opencv_core.Mat img,
opencv_core.Point pts,
IntBuffer npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_core.polylines(opencv_core.Mat img,
opencv_core.Point pts,
IntPointer npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color) |
static void |
opencv_core.polylines(opencv_core.Mat img,
opencv_core.Point pts,
IntPointer npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
void |
opencv_stitching.Blender.prepare(opencv_core.Point corners,
opencv_core.Size sizes) |
opencv_core.PointVectorVector |
opencv_core.PointVectorVector.put(long i,
long j,
opencv_core.Point value) |
opencv_core.Point |
opencv_core.Point.put(opencv_core.Point pt) |
opencv_core.PointVectorVector |
opencv_core.PointVectorVector.put(opencv_core.Point[]... array) |
static void |
opencv_core.putText(opencv_core.Mat img,
BytePointer text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color) |
static void |
opencv_core.putText(opencv_core.Mat img,
BytePointer text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color,
int thickness,
int lineType,
boolean bottomLeftOrigin)
renders text string in the image
|
static void |
opencv_core.putText(opencv_core.Mat img,
String text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color) |
static void |
opencv_core.putText(opencv_core.Mat img,
String text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color,
int thickness,
int lineType,
boolean bottomLeftOrigin) |
static void |
opencv_core.rectangle(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color) |
static void |
opencv_core.rectangle(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image
|
static opencv_core.Rect |
opencv_stitching.resultRoi(opencv_core.Point corners,
opencv_core.MatVector images) |
static opencv_core.Rect |
opencv_stitching.resultRoi(opencv_core.Point corners,
opencv_core.Size sizes) |
static opencv_core.Point |
opencv_stitching.resultTl(opencv_core.Point corners) |
static void |
opencv_imgproc.sepFilter2D(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Mat kernelX,
opencv_core.Mat kernelY,
opencv_core.Point anchor,
double delta,
int borderType)
applies separable 2D linear filter to the image
|
void |
opencv_imgproc.GeneralizedHough.setTemplate(opencv_core.Mat templ,
int cannyThreshold,
opencv_core.Point templCenter)
set template to search
|
void |
opencv_imgproc.GeneralizedHough.setTemplate(opencv_core.Mat edges,
opencv_core.Mat dx,
opencv_core.Mat dy,
opencv_core.Point templCenter) |
boolean |
opencv_objdetect.FeatureEvaluator.setWindow(opencv_core.Point p) |
void |
opencv_contrib.SelfSimDescriptor.SSD(opencv_core.Mat img,
opencv_core.Point pt,
opencv_core.Mat ssd) |
| Constructor and Description |
|---|
opencv_contrib.LogPolar_Adjacent(int w,
int h,
opencv_core.Point center) |
opencv_contrib.LogPolar_Adjacent(int w,
int h,
opencv_core.Point center,
int R,
double ro0,
double smin,
int full,
int S,
int sp)
Constructor
\param w the width of the input image
\param h the height of the input image
\param center the transformation center: where the output precision is maximal
\param R the number of rings of the cortical image (default value 70 pixel)
\param ro0 the radius of the blind spot (default value 3 pixel)
\param smin the size of the subpixel (default value 0.25 pixel)
\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
|
opencv_contrib.LogPolar_Interp(int w,
int h,
opencv_core.Point center) |
opencv_contrib.LogPolar_Interp(int w,
int h,
opencv_core.Point center,
int R,
double ro0,
int interp,
int full,
int S,
int sp)
Constructor
\param w the width of the input image
\param h the height of the input image
\param center the transformation center: where the output precision is maximal
\param R the number of rings of the cortical image (default value 70 pixel)
\param ro0 the radius of the blind spot (default value 3 pixel)
\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
|
opencv_contrib.LogPolar_Overlapping(int w,
int h,
opencv_core.Point center) |
opencv_contrib.LogPolar_Overlapping(int w,
int h,
opencv_core.Point center,
int R,
double ro0,
int full,
int S,
int sp)
Constructor
\param w the width of the input image
\param h the height of the input image
\param center the transformation center: where the output precision is maximal
\param R the number of rings of the cortical image (default value 70 pixel)
\param ro0 the radius of the blind spot (default value 3 pixel)
\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
|
opencv_core.LineIterator(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2) |
opencv_core.LineIterator(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
int connectivity,
boolean leftToRight)
intializes the iterator
|
opencv_core.MatConstIterator(opencv_core.Mat _m,
opencv_core.Point _pt)
constructor that sets the iterator to the specified element of the matrix
|
opencv_core.Point(opencv_core.Point pt) |
opencv_core.Point3i(opencv_core.Point pt) |
opencv_core.PointVectorVector(opencv_core.Point[]... array) |
opencv_core.Rect(opencv_core.Point pt1,
opencv_core.Point pt2) |
opencv_core.Rect(opencv_core.Point org,
opencv_core.Size sz) |
opencv_core.Size(opencv_core.Point pt) |
Copyright © 2014. All Rights Reserved.