OpenCV3+VS2017+单⽬标定+双⽬标定+双⽬测距
理论知识请参考《学习OpenCV中⽂版》(公式,函数描述⽅⾯可能有错误注意⼀下,还有不要看《学习OpenCV3中⽂版》,可以看
《Learning OpenCV3》英⽂原版,有少许错误注意⼀下)
下⾯直接上代码和结果:
说明:由于本⼈也是第⼀次接触摄像机,所以代码中注释⽐较多,也可能有错误,欢迎⼤家指出!
单⽬标定部分(矫正部分请⾃建⽂件夹)
rectangle函数opencv/*
单⽬标定
参数:
imageList 存放标定图⽚名称的txt
singleCalibrateResult 存放标定结果的txt
objectPoints 世界坐标系中点的坐标
corners_seq 存放图像中的⾓点,⽤于⽴体标定
cameraMatrix 相机的内参数矩阵
distCoeffs 相机的畸变系数
imageSize 输⼊图像的尺⼨(像素)
patternSize 标定板每⾏的⾓点个数, 标定板每列的⾓点个数 (9, 6)
chessboardSize 棋盘上每个⽅格的边长(mm)
注意:亚像素精确化时,允许输⼊单通道,8位或者浮点型图像。由于输⼊图像的类型不同,下⾯⽤作标定函数参数的内参数矩阵和畸变系数矩阵在初始化时也要数据注*/
bool singleCameraCalibrate(const char* imageList, const char* singleCalibrateResult, const char* undisortion_path, vector<vector<Point3f>>& objectPoints,
vector<vector<Point2f>>& corners_seq, Mat& cameraMatrix, Mat& distCoeffs, Size& imageSize, Siz
e patternSize, Size chessboardSize)
{
int n_boards = 0;
ifstream imageStore(imageList); // 打开存放标定图⽚名称的txt
ofstream resultStore(singleCalibrateResult); // 保存标定结果的txt
// 开始提取⾓点坐标
vector<Point2f> corners; // 存放⼀张图⽚的⾓点坐标
string imageName; // 读取的标定图⽚的名称
while (getline(imageStore, imageName)) // 读取txt的每⼀⾏(每⼀⾏存放了⼀张标定图⽚的名称)
{
n_boards++;
Mat imageInput = imread(imageName);
cvtColor(imageInput, imageInput, CV_RGB2GRAY);
imageSize.width = ls; // 获取图⽚的宽度
imageSize.height = ws; // 获取图⽚的⾼度
// 查标定板的⾓点
bool found = findChessboardCorners(imageInput, patternSize, corners); // 最后⼀个参数int flags的缺省值为:CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NOR // 亚像素精确化。在findChessboardCorners中⾃动调⽤了cornerSubPix,为了更加精细化,我们⾃⼰再调⽤⼀次。
if (found) // 当所有的⾓点都被到
{
TermCriteria criteria = TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001); // 终⽌标准,迭代40次或者达到0.001的像素精度
cornerSubPix(imageInput, corners, Size(11, 11), Size(-1, -1), criteria);// 由于我们的图像只存较⼤,将搜索窗⼝调⼤⼀些,(11, 11)为真实窗⼝的⼀半,真实⼤ corners_seq.push_back(corners); // 存⼊⾓点序列
// 绘制⾓点
//drawChessboardCorners(imageInput, patternSize, corners, true);
//imshow("cornersframe", imageInput);
//waitKey(500); // 暂停0.5s
}
}
//destroyWindow("cornersframe");
// 进⾏相机标定
// 计算⾓点对应的三维坐标
int pic, i, j;
for (pic = 0; pic < n_boards; pic++)
{
vector<Point3f> realPointSet;
for (i = 0; i < patternSize.height; i++)
for (i = 0; i < patternSize.height; i++)
{
for (j = 0; j < patternSize.width; j++)
{
Point3f realPoint;
// 假设标定板位于世界坐标系Z=0的平⾯
realPoint.x = j * chessboardSize.width;
realPoint.y = i * chessboardSize.height;
realPoint.z = 0;
realPointSet.push_back(realPoint);
}
}
objectPoints.push_back(realPointSet);
}
// 执⾏标定程序
vector<Mat> rvec; // 旋转向量
vector<Mat> tvec; // 平移向量
calibrateCamera(objectPoints, corners_seq, imageSize, cameraMatrix, distCoeffs, rvec, tvec, 0);
// 保存标定结果
resultStore << "相机内参数矩阵" << endl;
resultStore << cameraMatrix << endl << endl;
resultStore << "相机畸变系数" << endl;
resultStore << distCoeffs << endl << endl;
// 计算重投影点,与原图⾓点⽐较,得到误差
double errPerImage = 0.; // 每张图像的误差
double errAverage = 0.; // 所有图像的平均误差
double totalErr = 0.; // 误差总和
vector<Point2f> projectImagePoints; // 重投影点
for (i = 0; i < n_boards; i++)
{
vector<Point3f> tempObjectPoints = objectPoints[i]; // 临时三维点
// 计算重投影点
projectPoints(tempObjectPoints, rvec[i], tvec[i], cameraMatrix, distCoeffs, projectImagePoints);
// 计算新的投影点与旧的投影点之间的误差
vector<Point2f> tempCornersPoints = corners_seq[i];// 临时存放旧投影点
Mat tempCornersPointsMat = Mat(1, tempCornersPoints.size(), CV_32FC2); // 定义成两个通道的Mat是为了计算误差
Mat projectImagePointsMat = Mat(1, projectImagePoints.size(), CV_32FC2);
// 赋值
for (int j = 0; j < tempCornersPoints.size(); j++)
{
projectImagePointsMat.at<Vec2f>(0, j) = Vec2f(projectImagePoints[j].x, projectImagePoints[j].y);
tempCornersPointsMat.at<Vec2f>(0, j) = Vec2f(tempCornersPoints[j].x, tempCornersPoints[j].y);
}
// opencv⾥的norm函数其实把这⾥的两个通道分别分开来计算的(X1-X2)^2的值,然后统⼀求和,最后进⾏根号
errPerImage = norm(tempCornersPointsMat, projectImagePointsMat, NORM_L2) / (patternSize.width * patternSize.height); totalErr += errPerImage;
resultStore << "第" << i + 1 << "张图像的平均误差为:" << errPerImage << endl;
}
resultStore << "全局平局误差为:" << totalErr / n_boards << endl;
imageStore.close();
resultStore.close();
// 进⾏畸变矫正
Mat undistortImage;
string imageRoute;
int k = 1;
ifstream imageStore_(imageList);
string imageName_;
while (getline(imageStore_, imageName_))
{
stringstream StrStm;
string temp;
Mat distortImage = imread(imageName_);
cvtColor(distortImage, distortImage, CV_RGB2GRAY);
undistortImage = distortImage.clone();
undistort(distortImage, undistortImage, cameraMatrix, distCoeffs);
StrStm << k++;
StrStm << k++;
StrStm >> temp;
imageRoute = undisortion_path + temp + ".jpg"; imwrite(imageRoute, undistortImage);
StrStm.clear();
imageRoute.clear();
}
cout << "已矫正!" << endl;
imageStore_.close();
return true;
}
结果:
双⽬标定部分(⽤到单⽬标定⾥的图像点和三维点,所以直接调⽤⽴体标定函数)
/*
双⽬标定:计算两摄像机相对旋转矩阵 R,平移向量 T, 本征矩阵E, 基础矩阵F
参数:
stereoCalibrateResult 存放⽴体标定结果的txt
objectPoints 三维点
imagePoints ⼆维图像上的点
cameraMatrix 相机内参数
distCoeffs 相机畸变系数
imageSize 图像尺⼨
R 左右相机相对的旋转矩阵
T 左右相机相对的平移向量
E 本征矩阵
F 基础矩阵
*/
bool stereoCalibrate(const char* stereoCalibrateResult, vector<vector<Point3f>> objectPoints, vector<vector<Point2f>> imagePoints1, vector<vector<Point2f>> im Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size& imageSize, Mat& R, Mat& T, Mat& E, Mat& F)
{
ofstream stereoStore(stereoCalibrateResult);
TermCriteria criteria = TermCriteria(TermCriteria::COUNT | TermCriteria::EPS, 30, 1e-6); // 终⽌条件
stereoCalibrate(objectPoints, imagePoints1, imagePoints2, cameraMatrix1, distCoeffs1,
cameraMatrix2, distCoeffs2, imageSize, R, T, E, F, CALIB_FIX_INTRINSIC, criteria); // 注意参数顺序,可以到保存的⽂件中查看,避免返回时出错
stereoStore << "左相机内参数:" << endl;
stereoStore << cameraMatrix1 << endl;
stereoStore << "右相机内参数:" << endl;
stereoStore << cameraMatrix2 << endl;
stereoStore << "左相机畸变系数:" << endl;
stereoStore << distCoeffs1 << endl;
stereoStore << "右相机畸变系数:" << endl;
stereoStore << distCoeffs2 << endl;
stereoStore << "旋转矩阵:" << endl;
stereoStore << R << endl;
stereoStore << "平移向量:" << endl;
stereoStore << T << endl;
stereoStore << "本征矩阵:" << endl;
stereoStore << E << endl;
stereoStore << "基础矩阵:" << endl;
stereoStore << F << endl;
stereoStore.close();
return true;
}
结果:
⽴体校正
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。
发表评论