OpenCV校准双目相机并测量距离

发布于:2025-08-07 ⋅ 阅读:(18) ⋅ 点赞:(0)

0、简介

利用OpenCV 自带的双目立体校准程序,对相机求取内参、外参后。校准图像,在利用OpenCV 再带的SGB算法,求取视差图。实现框选画面中任意位置,计算该位置距离相机的距离。

1、采集左右图像

采集做相机图像
在这里插入图片描述
对应的右边相机图像
在这里插入图片描述
xml 文件保存图像对,依次为左图、右图、左图、右图。。。。

<?xml version="1.0"?>
<opencv_storage>
<imagelist>
	"stereo/L/IMG_0001.jpg"
	"stereo/R/IMG_0001.jpg"
	"stereo/L/IMG_0002.jpg"
	"stereo/R/IMG_0002.jpg"
	"stereo/L/IMG_0003.jpg"
	"stereo/R/IMG_0003.jpg"
	"stereo/L/IMG_0004.jpg"
	"stereo/R/IMG_0004.jpg"
	"stereo/L/IMG_0005.jpg"
	"stereo/R/IMG_0005.jpg"
	"stereo/L/IMG_0006.jpg"
	"stereo/R/IMG_0006.jpg"
	"stereo/L/IMG_0007.jpg"
	"stereo/R/IMG_0007.jpg"
	"stereo/L/IMG_0008.jpg"
	"stereo/R/IMG_0008.jpg"
	"stereo/L/IMG_0009.jpg"
	"stereo/R/IMG_0009.jpg"
	"stereo/L/IMG_0010.jpg"
	"stereo/R/IMG_0010.jpg"
	"stereo/L/IMG_0011.jpg"
	"stereo/R/IMG_0011.jpg"
	"stereo/L/IMG_0012.jpg"
	"stereo/R/IMG_0012.jpg"
	"stereo/L/IMG_0013.jpg"
	"stereo/R/IMG_0013.jpg"
	"stereo/L/IMG_0014.jpg"
	"stereo/R/IMG_0014.jpg"
	"stereo/L/IMG_0015.jpg"
	"stereo/R/IMG_0015.jpg"
	"stereo/L/IMG_0016.jpg"
	"stereo/R/IMG_0016.jpg"
	"stereo/L/IMG_0017.jpg"
	"stereo/R/IMG_0017.jpg"
	"stereo/L/IMG_0018.jpg"
	"stereo/R/IMG_0018.jpg"
	"stereo/L/IMG_0019.jpg"
	"stereo/R/IMG_0019.jpg"
	"stereo/L/IMG_0020.jpg"
	"stereo/R/IMG_0020.jpg"
</imagelist>
</opencv_storage>

2、求解内外参数

程序

/* This is sample from the OpenCV book. The copyright notice is below */

/* *************** License:**************************
   Oct. 3, 2008
   Right to use this code in any way you want without warranty, support or any guarantee of it working.

   BOOK: It would be nice if you cited it:
   Learning OpenCV: Computer Vision with the OpenCV Library
     by Gary Bradski and Adrian Kaehler
     Published by O'Reilly Media, October 3, 2008

   AVAILABLE AT:
     http://www.amazon.com/Learning-OpenCV-Computer-Vision-Library/dp/0596516134
     Or: http://oreilly.com/catalog/9780596516130/
     ISBN-10: 0596516134 or: ISBN-13: 978-0596516130

   OPENCV WEBSITES:
     Homepage:      http://opencv.org
     Online docs:   http://docs.opencv.org
     GitHub:        https://github.com/opencv/opencv/
   ************************************************** */

#include "opencv2/calib3d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"

#include <vector>
#include <string>
#include <algorithm>
#include <iostream>
#include <iterator>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>

using namespace cv;
using namespace std;

static int print_help(char** argv)
{
    cout <<
        " Given a list of chessboard images, the number of corners (nx, ny)\n"
        " on the chessboards, and a flag: useCalibrated for \n"
        "   calibrated (0) or\n"
        "   uncalibrated \n"
        "     (1: use stereoCalibrate(), 2: compute fundamental\n"
        "         matrix separately) stereo. \n"
        " Calibrate the cameras and display the\n"
        " rectified results along with the computed disparity images.   \n" << endl;
    cout << "Usage:\n " << argv[0] << " -w=<board_width default=9> -h=<board_height default=6> -s=<square_size default=1.0> <image list XML/YML file default=stereo_calib.xml>\n" << endl;
    return 0;
}


static void
StereoCalib(const vector<string>& imagelist, Size boardSize, float squareSize, bool displayCorners = false, bool useCalibrated = true, bool showRectified = true)
{
    if (imagelist.size() % 2 != 0)
    {
        cout << "Error: the image list contains odd (non-even) number of elements\n";
        return;
    }

    const int maxScale = 4;
    // ARRAY AND VECTOR STORAGE:

    vector<vector<Point2f> > imagePoints[2];
    vector<vector<Point3f> > objectPoints;
    Size imageSize;

    int i, j, k, nimages = (int)imagelist.size() / 2;

    imagePoints[0].resize(nimages);
    imagePoints[1].resize(nimages);
    vector<string> goodImageList;

    for (i = j = 0; i < nimages; i++)
    {
        for (k = 0; k < 2; k++)
        {
            const string& filename = imagelist[i * 2 + k];
            Mat img = imread(filename, 0);

            if (img.empty())
                break;
            if (imageSize == Size())
                imageSize = img.size();
            else if (img.size() != imageSize)
            {
                cout << "The image " << filename << " has the size different from the first image size. Skipping the pair\n";
                break;
            }
            bool found = false;
            vector<Point2f>& corners = imagePoints[k][j];
            for (int scale = 1; scale <= maxScale; scale++)
            {
                Mat timg;
                if (scale == 1)
                    timg = img;
                else
                    resize(img, timg, Size(), scale, scale, INTER_LINEAR_EXACT);
                found = findChessboardCorners(timg, boardSize, corners,
                    CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
                if (found)
                {
                    if (scale > 1)
                    {
                        Mat cornersMat(corners);
                        cornersMat *= 1. / scale;
                    }
                    break;
                }
            }
            if (displayCorners)
            {
                cout << filename << endl;
                Mat cimg, cimg1;
                cvtColor(img, cimg, COLOR_GRAY2BGR);
                drawChessboardCorners(cimg, boardSize, corners, found);
                double sf = 640. / MAX(img.rows, img.cols);
                resize(cimg, cimg1, Size(), sf, sf, INTER_LINEAR_EXACT);
                imshow("corners", cimg1);
                char c = (char)waitKey(500);
                if (c == 27 || c == 'q' || c == 'Q') //Allow ESC to quit
                    exit(-1);
            }
            else
                putchar('.');
            if (!found)
                break;
            cornerSubPix(img, corners, Size(11, 11), Size(-1, -1),
                TermCriteria(TermCriteria::COUNT + TermCriteria::EPS,
                    30, 0.01));
        }
        if (k == 2)
        {
            goodImageList.push_back(imagelist[i * 2]);
            goodImageList.push_back(imagelist[i * 2 + 1]);
            j++;
        }
    }
    cout << j << " pairs have been successfully detected.\n";
    nimages = j;
    if (nimages < 2)
    {
        cout << "Error: too little pairs to run the calibration\n";
        return;
    }

    imagePoints[0].resize(nimages);
    imagePoints[1].resize(nimages);
    objectPoints.resize(nimages);

    for (i = 0; i < nimages; i++)
    {
        for (j = 0; j < boardSize.height; j++)
            for (k = 0; k < boardSize.width; k++)
                objectPoints[i].push_back(Point3f(k * squareSize, j * squareSize, 0));
    }

    cout << "Running stereo calibration ...\n";

    Mat cameraMatrix[2], distCoeffs[2];
    cameraMatrix[0] = initCameraMatrix2D(objectPoints, imagePoints[0], imageSize, 0);
    cameraMatrix[1] = initCameraMatrix2D(objectPoints, imagePoints[1], imageSize, 0);
    Mat R, T, E, F;

    double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],
        cameraMatrix[0], distCoeffs[0],
        cameraMatrix[1], distCoeffs[1],
        imageSize, R, T, E, F,
        CALIB_FIX_ASPECT_RATIO +
        CALIB_ZERO_TANGENT_DIST +
        CALIB_USE_INTRINSIC_GUESS +
        CALIB_SAME_FOCAL_LENGTH +
        CALIB_RATIONAL_MODEL +
        CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5,
        TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, 1e-5));
    cout << "done with RMS error=" << rms << endl;

    // CALIBRATION QUALITY CHECK
    // because the output fundamental matrix implicitly
    // includes all the output information,
    // we can check the quality of calibration using the
    // epipolar geometry constraint: m2^t*F*m1=0
    double err = 0;
    int npoints = 0;
    vector<Vec3f> lines[2];
    for (i = 0; i < nimages; i++)
    {
        int npt = (int)imagePoints[0][i].size();
        Mat imgpt[2];
        for (k = 0; k < 2; k++)
        {
            imgpt[k] = Mat(imagePoints[k][i]);
            undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]);
            computeCorrespondEpilines(imgpt[k], k + 1, F, lines[k]);
        }
        for (j = 0; j < npt; j++)
        {
            double errij = fabs(imagePoints[0][i][j].x * lines[1][j][0] +
                imagePoints[0][i][j].y * lines[1][j][1] + lines[1][j][2]) +
                fabs(imagePoints[1][i][j].x * lines[0][j][0] +
                    imagePoints[1][i][j].y * lines[0][j][1] + lines[0][j][2]);
            err += errij;
        }
        npoints += npt;
    }
    cout << "average epipolar err = " << err / npoints << endl;

    // save intrinsic parameters
    FileStorage fs("intrinsics.yml", FileStorage::WRITE);
    if (fs.isOpened())
    {
        fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
            "M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
        fs.release();
    }
    else
        cout << "Error: can not save the intrinsic parameters\n";

    Mat R1, R2, P1, P2, Q;
    Rect validRoi[2];

    stereoRectify(cameraMatrix[0], distCoeffs[0],
        cameraMatrix[1], distCoeffs[1],
        imageSize, R, T, R1, R2, P1, P2, Q,
        CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);

    fs.open("extrinsics.yml", FileStorage::WRITE);
    if (fs.isOpened())
    {
        fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;
        fs.release();
    }
    else
        cout << "Error: can not save the extrinsic parameters\n";

    // OpenCV can handle left-right
    // or up-down camera arrangements
    bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3));

    // COMPUTE AND DISPLAY RECTIFICATION
    if (!showRectified)
        return;

    Mat rmap[2][2];
    // IF BY CALIBRATED (BOUGUET'S METHOD)
    if (useCalibrated)
    {
        // we already computed everything
    }
    // OR ELSE HARTLEY'S METHOD
    else
        // use intrinsic parameters of each camera, but
        // compute the rectification transformation directly
        // from the fundamental matrix
    {
        vector<Point2f> allimgpt[2];
        for (k = 0; k < 2; k++)
        {
            for (i = 0; i < nimages; i++)
                std::copy(imagePoints[k][i].begin(), imagePoints[k][i].end(), back_inserter(allimgpt[k]));
        }
        F = findFundamentalMat(Mat(allimgpt[0]), Mat(allimgpt[1]), FM_8POINT, 0, 0);
        Mat H1, H2;
        stereoRectifyUncalibrated(Mat(allimgpt[0]), Mat(allimgpt[1]), F, imageSize, H1, H2, 3);

        R1 = cameraMatrix[0].inv() * H1 * cameraMatrix[0];
        R2 = cameraMatrix[1].inv() * H2 * cameraMatrix[1];
        P1 = cameraMatrix[0];
        P2 = cameraMatrix[1];
    }

    //Precompute maps for cv::remap()
    initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
    initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);

    Mat canvas;
    double sf;
    int w, h;
    if (!isVerticalStereo)
    {
        sf = 600. / MAX(imageSize.width, imageSize.height);
        w = cvRound(imageSize.width * sf);
        h = cvRound(imageSize.height * sf);
        canvas.create(h, w * 2, CV_8UC3);
    }
    else
    {
        sf = 300. / MAX(imageSize.width, imageSize.height);
        w = cvRound(imageSize.width * sf);
        h = cvRound(imageSize.height * sf);
        canvas.create(h * 2, w, CV_8UC3);
    }

    for (i = 0; i < nimages; i++)
    {
        for (k = 0; k < 2; k++)
        {
            Mat img = imread(goodImageList[i * 2 + k], 0), rimg, cimg;
            remap(img, rimg, rmap[k][0], rmap[k][1], INTER_LINEAR);
            //imshow("0", img);
            //imshow("1", rimg);
            //waitKey(0);
            cvtColor(rimg, cimg, COLOR_GRAY2BGR);
            Mat canvasPart = !isVerticalStereo ? canvas(Rect(w * k, 0, w, h)) : canvas(Rect(0, h * k, w, h));
            resize(cimg, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);
            if (useCalibrated)
            {
                Rect vroi(cvRound(validRoi[k].x * sf), cvRound(validRoi[k].y * sf),
                    cvRound(validRoi[k].width * sf), cvRound(validRoi[k].height * sf));
                rectangle(canvasPart, vroi, Scalar(0, 0, 255), 3, 8);
            }
        }

        if (!isVerticalStereo)
            for (j = 0; j < canvas.rows; j += 16)
                line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
        else
            for (j = 0; j < canvas.cols; j += 16)
                line(canvas, Point(j, 0), Point(j, canvas.rows), Scalar(0, 255, 0), 1, 8);
        imshow("rectified", canvas);
        char c = (char)waitKey();
        if (c == 27 || c == 'q' || c == 'Q')
            break;
    }
}


static bool readStringList(const string& filename, vector<string>& l)
{
    l.resize(0);
    FileStorage fs(filename, FileStorage::READ);
    if (!fs.isOpened())
        return false;
    FileNode n = fs.getFirstTopLevelNode();
    if (n.type() != FileNode::SEQ)
        return false;
    FileNodeIterator it = n.begin(), it_end = n.end();
    for (; it != it_end; ++it)
        l.push_back((string)*it);
    return true;
}

int main(int argc, char** argv)
{
    Size boardSize;
    string imagelistfn;
    bool showRectified;
    //cv::CommandLineParser parser(argc, argv, "{w|9|}{h|6|}{s|1.0|}{nr||}{help||}{@input|stereo_calib.xml|}");
    //if (parser.has("help"))
    //    return print_help(argv);
    showRectified = true;
    imagelistfn = "stereo_calib.xml";
    boardSize.width = 8;
    boardSize.height =5;
    float squareSize = 1;

    vector<string> imagelist;
    bool ok = readStringList(imagelistfn, imagelist);
    if (!ok || imagelist.empty())
    {
        cout << "can not open " << imagelistfn << " or the string list is empty" << endl;
        return print_help(argv);
    }

    StereoCalib(imagelist, boardSize, squareSize, true, true, showRectified);
    return 0;
}

只要修改棋盘格的角点

boardSize.width = 8;
boardSize.height =5;
float squareSize = 1;

3 、角点匹配

在这里插入图片描述

4、矫正结果可视化

在这里插入图片描述
可以看到共有20组图片,矫正的误差是0.5 pix,这个数字最好小于1。
在这里插入图片描述
我们可以看到左右图像中,同一位置在在同一水平线上。

5、SGB算法匹配

/*
 *  stereo_match.cpp
 *  calibration
 *
 *  Created by Victor  Eruhimov on 1/18/10.
 *  Copyright 2010 Argus Corp. All rights reserved.
 *
 */

#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core/utility.hpp"

#include <stdio.h>
#include <sstream>
#include <string>
#include <vector>
#include<algorithm>
#include<cstdlib>
#include<fstream>
#include<iostream>
#include<sstream>
#include<vector>
#include<chrono>
#include<iostream>
#include<io.h>


using namespace cv;
int get_file_names(const std::string jpg_path, std::vector<std::string>& jpg_names)
{
    intptr_t Handle;
    struct _finddata_t FileInfo;
    std::string p;
    std::string path = jpg_path;
    jpg_names.clear();
    if ((Handle = _findfirst(p.assign(path).append("\\*").c_str(), &FileInfo)) == -1)
        printf("----\n");
    else
    {
       // printf("---1--%s\n", FileInfo.name);
        while (_findnext(Handle, &FileInfo) == 0)
        {
            //printf("---2--%s\n", FileInfo.name);
            std::string name_tem = FileInfo.name;
            if (name_tem.length() > 4)
            {
                jpg_names.push_back(name_tem);
            }
        }
        _findclose(Handle);
    }
    return 0;
}

static void print_help(char** argv)
{
    printf("\nDemo stereo matching converting L and R images into disparity and point clouds\n");
    printf("\nUsage: %s <left_image> <right_image> [--algorithm=bm|sgbm|hh|hh4|sgbm3way] [--blocksize=<block_size>]\n"
        "[--max-disparity=<max_disparity>] [--scale=scale_factor>] [-i=<intrinsic_filename>] [-e=<extrinsic_filename>]\n"
        "[--no-display] [--color] [-o=<disparity_image>] [-p=<point_cloud_file>]\n", argv[0]);
}

static void saveXYZ(const char* filename, const Mat& mat)
{
    const double max_z = 1.0e4;
    FILE* fp = fopen(filename, "wt");
    for (int y = 0; y < mat.rows; y++)
    {
        for (int x = 0; x < mat.cols; x++)
        {
            Vec3f point = mat.at<Vec3f>(y, x);
            if (fabs(point[2] - max_z) < FLT_EPSILON || fabs(point[2]) > max_z) continue;
            fprintf(fp, "%f %f %f\n", point[0], point[1], point[2]);
        }
    }
    fclose(fp);
}

int main_(int argc, char** argv)
{
    std::string img1_filename = "";
    std::string img2_filename = "";
    std::string img3_filename = "";
    std::string intrinsic_filename = "";
    std::string extrinsic_filename = "";
    std::string disparity_filename = "";
    std::string point_cloud_filename = "";

    enum { STEREO_BM = 0, STEREO_SGBM = 1, STEREO_HH = 2, STEREO_VAR = 3, STEREO_3WAY = 4, STEREO_HH4 = 5 };
    int alg = STEREO_SGBM;
    int SADWindowSize, numberOfDisparities;
    bool no_display;
    bool color_display;
    float scale;

    Ptr<StereoBM> bm = StereoBM::create(16, 9);
    Ptr<StereoSGBM> sgbm = StereoSGBM::create(0, 16, 3);
    //cv::CommandLineParser parser(argc, argv,
    //    "{@arg1||}{@arg2||}{help h||}{algorithm||}{max-disparity|0|}{blocksize|0|}{no-display||}{color||}{scale|1|}{i||}{e||}{o||}{p||}");
    //if (parser.has("help"))
    //{
    //    print_help(argv);
    //    return 0;
    //}
    img1_filename = "Left_0001_20250323_084427.JPG";
    img2_filename = "Right_0001_20250323_084427.JPG";
    img3_filename = "";

    //if (parser.has("algorithm"))
    //{
    //    std::string _alg = parser.get<std::string>("algorithm");
    //    alg = _alg == "bm" ? STEREO_BM :
    //        _alg == "sgbm" ? STEREO_SGBM :
    //        _alg == "hh" ? STEREO_HH :
    //        _alg == "var" ? STEREO_VAR :
    //        _alg == "hh4" ? STEREO_HH4 :
    //        _alg == "sgbm3way" ? STEREO_3WAY : -1;
    //}
    alg = STEREO_SGBM;
    numberOfDisparities = 96;
    SADWindowSize = 5;
    scale = 0.3;
    no_display = false;
    color_display = true;
    extrinsic_filename = "extrinsics.yml";
    intrinsic_filename = "intrinsics.yml";
    disparity_filename = "disparity_filename.png";
    point_cloud_filename = "point_cloud_filename.png";

    //if (alg < 0)
    //{
    //    printf("Command-line parameter error: Unknown stereo algorithm\n\n");
    //    print_help(argv);
    //    return -1;
    //}
    if (numberOfDisparities < 1 || numberOfDisparities % 16 != 0)
    {
        printf("Command-line parameter error: The max disparity (--maxdisparity=<...>) must be a positive integer divisible by 16\n");
        print_help(argv);
        return -1;
    }
    if (scale < 0)
    {
        printf("Command-line parameter error: The scale factor (--scale=<...>) must be a positive floating-point number\n");
        return -1;
    }
    if (SADWindowSize < 1 || SADWindowSize % 2 != 1)
    {
        printf("Command-line parameter error: The block size (--blocksize=<...>) must be a positive odd number\n");
        return -1;
    }
    if (img1_filename.empty() || img2_filename.empty())
    {
        printf("Command-line parameter error: both left and right images must be specified\n");
        return -1;
    }
    if ((!intrinsic_filename.empty()) ^ (!extrinsic_filename.empty()))
    {
        printf("Command-line parameter error: either both intrinsic and extrinsic parameters must be specified, or none of them (when the stereo pair is already rectified)\n");
        return -1;
    }

    if (extrinsic_filename.empty() && !point_cloud_filename.empty())
    {
        printf("Command-line parameter error: extrinsic and intrinsic parameters must be specified to compute the point cloud\n");
        return -1;
    }

    int color_mode = alg == STEREO_BM ? 0 : -1;
    std::string img_path = "C:/Users/11364/source/repos/mine/test_opencv3/test_opencv/MOV2/";
    std::vector<std::string> jpg_names;
    std::cout << "Read  path: " << img_path << std::endl;
    int i = 0;
    get_file_names(img_path, jpg_names);
  
    for (std::string jpg_name : jpg_names)
    {
      
        std::string right_name;
        std::string dis_name;
        std::cout <<"All jpg names:" << jpg_name << std::endl;
        std::cout << "jpg_name find left:" << jpg_name.find("left") << std::endl;
        if (jpg_name.find("l") ==9)
        {
            std::cout << "Left========:" << jpg_name << std::endl;
            std::string left_= jpg_name;
            std::string left_2 = jpg_name;
            right_name = left_.replace(left_.begin()+9, left_.end(), "right.jpg");
            std::cout << "Right=====:" << right_name << std::endl;
            dis_name = left_.replace(left_.begin() + 9, left_.end(), "dis.jpg");
            std::cout << "dis_name====:" << dis_name << std::endl;
        }
        else
        {
            continue;
        }
        img1_filename = img_path + jpg_name;
        img2_filename = img_path + right_name;
        img3_filename = img_path + dis_name;

        Mat img1 = imread(img1_filename, color_mode);
        Mat img2 = imread(img2_filename, color_mode);
        //std::cout << "img1_filename:" << img1_filename << std::endl;
        //std::cout << "img2_filename:" << img2_filename << std::endl;


        if (img1.empty())
        {
            printf("Command-line parameter error: could not load the first input image file\n");
            return -1;
        }
        if (img2.empty())
        {
            printf("Command-line parameter error: could not load the second input image file\n");
            return -1;
        }

        if (scale != 1.f)
        {
            Mat temp1, temp2;
            int method = scale < 1 ? INTER_AREA : INTER_CUBIC;
            resize(img1, temp1, Size(), scale, scale, method);
            img1 = temp1;
            resize(img2, temp2, Size(), scale, scale, method);
            img2 = temp2;


        }

        //Mat enhanced, filtered, gradient;

        // 1. CLAHE增强对比度
        //Ptr<CLAHE> clahe = createCLAHE(3.0, Size(8, 8));
        //clahe->apply(img1, img1);
        //clahe->apply(img2, img2);

        // 2. 导向滤波降噪
        //Ptr<ximgproc::GuidedFilter> guidedFilter = ximgproc::createGuidedFilter(enhanced, 5);
        ////guidedFilter->filter(enhanced, filtered);

        // 3. 计算梯度
        //Sobel(img1, img1, CV_8UC1, 1, 1);
        //Sobel(img2, img2, CV_8UC1, 1, 1);
        normalize(img1, img1, 0, 255, NORM_MINMAX, CV_8UC1);
        normalize(img2, img2, 0, 255, NORM_MINMAX, CV_8UC1);




        Size img_size = img1.size();

        Rect roi1, roi2;
        Mat Q;

        if (!intrinsic_filename.empty())
        {
            // reading intrinsic parameters
            FileStorage fs(intrinsic_filename, FileStorage::READ);
            if (!fs.isOpened())
            {
                printf("Failed to open file %s\n", intrinsic_filename.c_str());
                return -1;
            }

            Mat M1, D1, M2, D2;
            fs["M1"] >> M1;
            fs["D1"] >> D1;
            fs["M2"] >> M2;
            fs["D2"] >> D2;

            M1 *= scale;
            M2 *= scale;

            fs.open(extrinsic_filename, FileStorage::READ);
            if (!fs.isOpened())
            {
                printf("Failed to open file %s\n", extrinsic_filename.c_str());
                return -1;
            }

            Mat R, T, R1, P1, R2, P2;
            fs["R"] >> R;
            fs["T"] >> T;

            cv::stereoRectify(M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2);

            Mat map11, map12, map21, map22;
            cv::initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
            cv::initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);

            Mat img1r, img2r;
            cv::remap(img1, img1r, map11, map12, INTER_LINEAR);
            cv::remap(img2, img2r, map21, map22, INTER_LINEAR);

            img1 = img1r;
            img2 = img2r;
        }

        numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : ((img_size.width / 8) + 15) & -16;

        bm->setROI1(roi1);
        bm->setROI2(roi2);
        bm->setPreFilterCap(31);
        bm->setBlockSize(SADWindowSize > 0 ? SADWindowSize : 15);
        bm->setMinDisparity(0);
        bm->setNumDisparities(numberOfDisparities);
        bm->setTextureThreshold(10);
        bm->setUniquenessRatio(15);
        bm->setSpeckleWindowSize(100);
        bm->setSpeckleRange(32);
        bm->setDisp12MaxDiff(1);

        sgbm->setPreFilterCap(63);
        int sgbmWinSize = SADWindowSize > 0 ? SADWindowSize : 3;
        sgbm->setBlockSize(sgbmWinSize);

        int cn = img1.channels();

        sgbm->setP1(8 * cn * sgbmWinSize * sgbmWinSize);
        sgbm->setP2(32 * cn * sgbmWinSize * sgbmWinSize);
        sgbm->setMinDisparity(0);
        sgbm->setNumDisparities(numberOfDisparities);
        sgbm->setUniquenessRatio(10);
        sgbm->setSpeckleWindowSize(100);
        sgbm->setSpeckleRange(32);
        sgbm->setDisp12MaxDiff(1);
        if (alg == STEREO_HH)
            sgbm->setMode(StereoSGBM::MODE_HH);
        else if (alg == STEREO_SGBM)
            sgbm->setMode(StereoSGBM::MODE_SGBM);
        else if (alg == STEREO_HH4)
            sgbm->setMode(StereoSGBM::MODE_HH4);
        else if (alg == STEREO_3WAY)
            sgbm->setMode(StereoSGBM::MODE_SGBM_3WAY);

        Mat disp, disp8;


        int64 t = getTickCount();
        float disparity_multiplier = 1.0f;
        if (alg == STEREO_BM)
        {
            bm->compute(img1, img2, disp);
            if (disp.type() == CV_16S)
                disparity_multiplier = 16.0f;
        }
        else if (alg == STEREO_SGBM || alg == STEREO_HH || alg == STEREO_HH4 || alg == STEREO_3WAY)
        {
            sgbm->compute(img1, img2, disp);
            if (disp.type() == CV_16S)
                disparity_multiplier = 16.0f;
        }
        t = getTickCount() - t;
        printf("Time elapsed: %fms\n", t * 1000 / getTickFrequency());

        //disp = dispp.colRange(numberOfDisparities, img1p.cols);
        if (alg != STEREO_VAR)
            disp.convertTo(disp8, CV_8U, 255 / (numberOfDisparities * 16.));
        else
            disp.convertTo(disp8, CV_8U);

        Mat disp8_3c;
        if (color_display)
            cv::applyColorMap(disp8, disp8_3c, COLORMAP_TURBO);

        if (!disparity_filename.empty())
            imwrite(disparity_filename, color_display ? disp8_3c : disp8);

        if (!point_cloud_filename.empty())
        {
            printf("storing the point cloud...");
            fflush(stdout);
            Mat xyz;
            Mat floatDisp;
            //disp.convertTo(floatDisp, CV_32F, 1.0f / disparity_multiplier);
            reprojectImageTo3D(floatDisp, xyz, Q, true);
            saveXYZ(point_cloud_filename.c_str(), xyz);
            printf("\n");
        }

        if (!no_display)
        {
            std::ostringstream oss;
            oss << "disparity  " << (alg == STEREO_BM ? "bm" :
                alg == STEREO_SGBM ? "sgbm" :
                alg == STEREO_HH ? "hh" :
                alg == STEREO_VAR ? "var" :
                alg == STEREO_HH4 ? "hh4" :
                alg == STEREO_3WAY ? "sgbm3way" : "");
            oss << "  blocksize:" << (alg == STEREO_BM ? SADWindowSize : sgbmWinSize);
            oss << "  max-disparity:" << numberOfDisparities;
            std::string disp_name = oss.str();

            namedWindow("left", cv::WINDOW_NORMAL);
            imshow("left", img1);
			imwrite("Rec_L.png", img1);
            namedWindow("right", cv::WINDOW_NORMAL);
            imshow("right", img2);
            imwrite("Rec_R.png", img2);
            namedWindow(disp_name, cv::WINDOW_NORMAL);
            imshow(disp_name, color_display ? disp8_3c : disp8);
            imwrite(img3_filename, color_display ? disp8_3c : disp8);
            cv::waitKey(0);

            printf("press ESC key or CTRL+C to close...");
            fflush(stdout);
            printf("\n");

        }
    }


    return 0;
}

匹配结果
在这里插入图片描述

6、求取距离

基于4 中算出的视差图,根据公式z=bf/d。 b 是基线距离,f 是焦距,d是视差,z是相机距离物体的距离。可以算出距离。
在这里插入图片描述
代码如下

import cv2
import numpy as np
import time

# 读取图像
imgR = cv2.imread('Rec_R.png')  # 左图
imgL = cv2.imread('Rec_L.png')  # 右图

if imgL is None or imgR is None:
    raise ValueError("无法读取图像,请检查文件路径!")

# 调整图像大小并转换为灰度图(SGBM对灰度图处理更稳定)
imgL = cv2.resize(imgL, (640, 480))
imgR = cv2.resize(imgR, (640, 480))
grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)

# 相机内参(示例值,需根据实际标定结果修改)
focal_length = 100  # 焦距(像素)
baseline = 0.2  # 基线距离(米,左右相机光心间距)

# SGBM参数优化
blockSize = 5
img_channels = 3
stereo = cv2.StereoSGBM_create(
    minDisparity=0,  # 最小视差值(通常从0开始)
    numDisparities=128,  # 视差范围(必须是16的倍数)
    blockSize=blockSize,
    P1=8 * img_channels * blockSize ** 2,  # 平滑项参数(小值保留细节)
    P2=32 * img_channels * blockSize ** 2,  # 平滑项参数(P2 > P1,大值抑制噪声)
    disp12MaxDiff=1,  # 左右视差一致性检查阈值(避免错误匹配)
    preFilterCap=31,  # 预处理滤波截断值(推荐15-63,保留更多纹理)
    uniquenessRatio=15,  # 唯一性检查阈值(10-20,过滤模糊匹配)
    speckleWindowSize=100,  # 散斑噪声窗口大小(过滤小区域噪声)
    speckleRange=16,  # 散斑噪声允许的视差变化范围(推荐1-16)
    mode=cv2.STEREO_SGBM_MODE_HH  # 高精度模式
)

# 计时并计算视差
start_time = time.time()
disparity = stereo.compute(grayL, grayR)  # 修正输入顺序:左图->右图
end_time = time.time()
print(f"视差计算耗时:{end_time - start_time:.2f}秒")

# 视差图后处理(转换为真实视差值并过滤无效值)
disparity = disparity.astype(np.float32) / 16.0  # SGBM输出需除以16得到真实视差
disparity[disparity <= 0] = 0  # 过滤无效视差(<=0的值)

# 生成可视化结果
disp_gray = cv2.normalize(disparity, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)  # 灰度图
dis_color = cv2.applyColorMap(disp_gray, cv2.COLORMAP_JET)  # 彩色图(更直观)

# 计算深度图(基于三角测量公式:深度 = (焦距 × 基线) / 视差)
depth = np.zeros_like(disparity)
valid = disparity > 0  # 只计算有效视差区域
depth[valid] = (focal_length * baseline) / disparity[valid]

# 鼠标交互变量
drawing = False  # 是否正在绘制矩形
ix, iy = -1, -1  # 矩形起始坐标
fx, fy = -1, -1  # 矩形结束坐标


# 鼠标回调函数 - 框选区域并显示平均距离
def mouse_callback(event, x, y, flags, param):
    global drawing, ix, iy, fx, fy, imgL_copy

    if event == cv2.EVENT_LBUTTONDOWN:
        drawing = True
        ix, iy = x, y
        imgL_copy = imgL.copy()  # 复制原图用于绘制

    elif event == cv2.EVENT_MOUSEMOVE:
        if drawing:
            imgL_copy = imgL.copy()  # 清除之前的矩形
            cv2.rectangle(imgL_copy, (ix, iy), (x, y), (0, 255, 0), 2)
            cv2.imshow('left', imgL_copy)

    elif event == cv2.EVENT_LBUTTONUP:
        drawing = False
        fx, fy = x, y
        cv2.rectangle(imgL_copy, (ix, iy), (fx, fy), (0, 255, 0), 2)

        # 确保坐标顺序正确
        x1, y1 = min(ix, fx), min(iy, fy)
        x2, y2 = max(ix, fx), max(iy, fy)

        # 提取选定区域的深度值
        region = depth[y1:y2, x1:x2]
        valid_region = region[region > 0]  # 只考虑有效深度值

        if valid_region.size > 0:
            # 计算平均距离(保留两位小数)
            avg_distance = round(np.mean(valid_region), 2)

            # 在图像上显示结果
            result_text = "d:"+ str(round(avg_distance,2))+"m"
            cv2.putText(imgL_copy, result_text, (x1, y1 - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
            cv2.imshow('left', imgL_copy)

            print(f"选定区域: {x1},{y1}{x2},{y2}")
            print(f"平均距离: {avg_distance} 米")
            print(f"区域大小: {region.shape[1]}x{region.shape[0]} 像素")
            print(f"有效像素: {valid_region.size} / {region.size}")
        else:
            cv2.putText(imgL_copy, "无有效深度数据", (x1, y1 - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.imshow('left', imgL_copy)
            print("选定区域无有效深度数据")


# 设置鼠标回调
cv2.namedWindow('left')
cv2.setMouseCallback('left', mouse_callback)

# 显示结果
imgL_copy = imgL.copy()
cv2.imshow('left', imgL_copy)
cv2.imshow('right', imgR)
cv2.imshow('disparity', disp_gray)
cv2.imshow('disparity_color', dis_color)

# 按ESC退出
while True:
    key = cv2.waitKey(1) & 0xFF
    if key == 27:  # ESC键
        break

cv2.destroyAllWindows()

7、b 站视频链接

https://www.bilibili.com/video/BV1BRgpzgEQK/

创作不易 需要指导 可私信或者加V


网站公告

今日签到

点亮在社区的每一天
去签到