【发布时间】:2013-07-14 08:22:57
【问题描述】:
我对 getPerspectiveTransform 的参数有点困惑,因为我看不到正确的图像。这是我的代码。 original_image 变量是包含我想要裁剪并创建新图像的方形对象(和其他一些对象)的图像(类似于Android OpenCV Find Largest Square or Rectangle)。变量 p1、p2、p3 和 p4 是图像中最大正方形/矩形角的坐标。 p1为左上,p2为右上,p3为右下,p4为左下(顺时针分配)。
Mat src = new Mat(4,1,CvType.CV_32FC2);
src.put((int)p1.y,(int)p1.x, (int)p2.y,(int)p2.x, (int)p4.y,(int)p4.x, (int)p3.y,(int)p3.x);
Mat dst = new Mat(4,1,CvType.CV_32FC2);
dst.put(0,0, 0,original_image.width(), original_image.height(),original_image.width(), original_image.height(),0);
Mat perspectiveTransform = Imgproc.getPerspectiveTransform(src, dst);
Mat cropped_image = original_image.clone();
Imgproc.warpPerspective(untouched, cropped_image, perspectiveTransform, new Size(512,512));
当我尝试显示cropped_image 时,我得到一个“我不知道它是什么”的图像。我认为我在 getPerspectiveTransform() 中的参数不正确(或者是这样)。请帮忙。谢谢!
更新:当我调试我的代码时,我发现我的正方形/矩形的边缘不正确,除了 p4 之外,有些是完全正确的。这是我检测图像中正方形或矩形边缘的代码。除了具有白色轮廓的最大正方形/矩形的轮廓外,我的图像都是黑色的。
//we will find the edges of the new_image (corners of the square/rectangle)
Point p1 = new Point(10000, 10000); //upper left; minX && minY
Point p2 = new Point(0, 10000); //upper right; maxX && minY
Point p3 = new Point(0, 0); //lower right; maxX && maxY
Point p4 = new Point(10000, 0); //lower left; minX && maxY
double[] temp_pixel_color;
for (int x=0; x<new_image.rows(); x++) {
for (int y=0; y<new_image.cols(); y++) {
temp_pixel_color = new_image.get(x, y); //we have a black and white image so we only have one color channel
if (temp_pixel_color[0] > 200) { //we found a white pixel
if (x<=p1.x && y<=p1.y) { //for p1, minX && minY
p1.x = x;
p1.y = y;
}
else if (x>=p2.x && y<=p2.y) { //for p2, maxX && minY
p2.x = x;
p2.y = y;
}
else if (x>=p3.x && y>=p3.y) { //for p3, maxX && maxY
p3.x = x;
p3.y = y;
}
else if (x<=(int)p4.x && y>=(int)p4.y) { //for p4, minX && maxY
p4.x = x;
p4.y = y;
}
}
}
}
这是我的示例图片。在检测到边缘后绘制彩色圆圈时忽略它们:
更新:2013 年 7 月 16 日 我现在可以仅使用最大 4 点轮廓的 approxCurve 来检测拐角。这是我的代码:
private Mat findLargestRectangle(Mat original_image) {
Mat imgSource = original_image;
//Mat untouched = original_image.clone();
//convert the image to black and white
Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BGR2GRAY);
//convert the image to black and white does (8 bit)
Imgproc.Canny(imgSource, imgSource, 50, 50);
//apply gaussian blur to smoothen lines of dots
Imgproc.GaussianBlur(imgSource, imgSource, new Size(5, 5), 5);
//find the contours
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(imgSource, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
double maxArea = -1;
int maxAreaIdx = -1;
MatOfPoint temp_contour = contours.get(0); //the largest is at the index 0 for starting point
MatOfPoint2f approxCurve = new MatOfPoint2f();
MatOfPoint2f maxCurve = new MatOfPoint2f();
List<MatOfPoint> largest_contours = new ArrayList<MatOfPoint>();
for (int idx = 0; idx < contours.size(); idx++) {
temp_contour = contours.get(idx);
double contourarea = Imgproc.contourArea(temp_contour);
//compare this contour to the previous largest contour found
if (contourarea > maxArea) {
//check if this contour is a square
MatOfPoint2f new_mat = new MatOfPoint2f( temp_contour.toArray() );
int contourSize = (int)temp_contour.total();
Imgproc.approxPolyDP(new_mat, approxCurve, contourSize*0.05, true);
if (approxCurve.total() == 4) {
maxCurve = approxCurve;
maxArea = contourarea;
maxAreaIdx = idx;
largest_contours.add(temp_contour);
}
}
}
//create the new image here using the largest detected square
Mat new_image = new Mat(imgSource.size(), CvType.CV_8U); //we will create a new black blank image with the largest contour
Imgproc.cvtColor(new_image, new_image, Imgproc.COLOR_BayerBG2RGB);
Imgproc.drawContours(new_image, contours, maxAreaIdx, new Scalar(255, 255, 255), 1); //will draw the largest square/rectangle
double temp_double[] = maxCurve.get(0, 0);
Point p1 = new Point(temp_double[0], temp_double[1]);
Core.circle(new_image, new Point(p1.x, p1.y), 20, new Scalar(255, 0, 0), 5); //p1 is colored red
String temp_string = "Point 1: (" + p1.x + ", " + p1.y + ")";
temp_double = maxCurve.get(1, 0);
Point p2 = new Point(temp_double[0], temp_double[1]);
Core.circle(new_image, new Point(p2.x, p2.y), 20, new Scalar(0, 255, 0), 5); //p2 is colored green
temp_string += "\nPoint 2: (" + p2.x + ", " + p2.y + ")";
temp_double = maxCurve.get(2, 0);
Point p3 = new Point(temp_double[0], temp_double[1]);
Core.circle(new_image, new Point(p3.x, p3.y), 20, new Scalar(0, 0, 255), 5); //p3 is colored blue
temp_string += "\nPoint 3: (" + p3.x + ", " + p3.y + ")";
temp_double = maxCurve.get(3, 0);
Point p4 = new Point(temp_double[0], temp_double[1]);
Core.circle(new_image, new Point(p4.x, p4.y), 20, new Scalar(0, 255, 255), 5); //p1 is colored violet
temp_string += "\nPoint 4: (" + p4.x + ", " + p4.y + ")";
TextView temp_text = (TextView)findViewById(R.id.temp_text);
temp_text.setText(temp_string);
return new_image;
}
这是示例结果图像:
我已经为正方形/矩形的角画了圆圈,我还添加了一个文本视图来显示所有四个点。
【问题讨论】:
-
好的,有时间我会检查你的代码,但在那之前我会告诉你一个更简单的方法。如果你使用 findContours 和方法 "CV_CHAIN_APPROX_SIMPLE",你可以马上得到角点。
-
另外,那些不是“边缘”,那些是“角”:)
-
您确定“src”和“dst”创建正确吗?我认为使用“Point2f”数组而不是 Mat 会更好。
-
另外,不要寻找“白色”像素。只需使用轮廓。检查所有轮廓点,保存具有最小 x 的点。
-
感谢 Baci... 我认为 Java/Android 没有 Point2f 数据类型。在 Java 或 Android 中, Imgproc.getPerspectiveTransform() 接受两个 Mat 参数。我错过了什么吗?再次感谢
标签: android opencv transformation warp