1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
| #include<iostream> #include <fstream> #include "opencv2/opencv.hpp"
using namespace std; using namespace cv;
inline float calcAvg(const Mat& src, const Rect&rect) { Mat image = src.clone(); if (image.channels() == 3) { cvtColor(image, image, COLOR_BGR2GRAY); }
if ((rect & Rect(0, 0, image.cols, image.rows)) == rect) { Mat patch = image(rect); patch.convertTo(patch, CV_32F); float avg_value = 0.0; for (size_t i = 0; i < patch.rows; i++) { float *data = patch.ptr<float>(i); for (size_t j = 0; j < patch.cols; j++) { avg_value += (*data++); } } return avg_value*1.0 / (patch.rows*patch.cols); } else { return 0.0; } }
void main() { VideoCapture cap(0); Mat src; while (cap.read(src)) { Mat ori = src.clone(); Mat image = src.clone(); Mat dst; Vec3b *pDataMat; int pixMax = 0, pixMin = 255; int targetpixMax = 200, targetpixMin = 100;
double t1 = getTickCount(); for (int i = 0; i < image.rows; i++) { pDataMat = image.ptr<Vec3b>(i); for (int j = 0; j < image.cols; j++) { for (int k = 0; k < image.channels(); k++) { if (pDataMat[j][k] > pixMax) pixMax = pDataMat[j][k]; if ((int)pDataMat[j][k] < pixMin) pixMin = (int)pDataMat[j][k]; } } } for (int i = 0; i < image.rows; i++) { pDataMat = image.ptr<Vec3b>(i); for (int j = 0; j < image.cols; j++) { for (int k = 0; k < image.channels(); k++) { pDataMat[j][k] = (pDataMat[j][k] - pixMin) * (targetpixMax-targetpixMin) / (pixMax - pixMin)+targetpixMin; } } } double t2 = getTickCount(); putText(image, "time:" + to_string( (t2 - t1)/ getTickFrequency()*1000), Point(20, 30), 1, 2, Scalar(0, 255, 0), 2); imshow("基于给定范围的图像拉伸(指针手写)", image);
Mat dstImage; double t3 = getTickCount(); ori.convertTo(dstImage, -1, 100.0/255, 100); double t4 = getTickCount(); putText(dstImage, "time:" + to_string((t4 - t3) / getTickFrequency() * 1000), Point(20, 30), 1, 2, Scalar(0, 255, 0), 2); imshow("基于给定范围的图像拉伸(opencv自带)", dstImage);
double t5 = getTickCount(); float grayScale = calcAvg(src, Rect(0, 0, src.cols, src.rows)); double t6 = getTickCount(); printf("calcAvg time:%.2f\n " ,(t6 - t5) / getTickFrequency() * 1000);
double t7 = getTickCount(); Scalar mean_val = cv::mean(src); float fmean = (mean_val[0] + mean_val[1] + mean_val[2])/3.0; double t8 = getTickCount(); printf( "mean time:%.2f\n" , (t8 - t7) / getTickFrequency() * 1000);
float avePix = 100; float delta = 3; Mat temp = src.reshape(1); double minVal, maxVal; minMaxIdx(temp, &minVal, &maxVal, NULL, NULL); if ((fmean<avePix- delta) || (fmean>avePix+ delta)) { src.convertTo(src, CV_32F); src = src + Scalar::all(avePix) - mean_val; float alpha1 = (avePix - 0) / (fmean - minVal); float alpha2 = (255 - avePix) / (maxVal - fmean); float alpha = (alpha1 < alpha2) ? alpha1 : alpha2; dst = Scalar::all(avePix) - alpha*(Scalar::all(avePix) - src); dst.convertTo(dst, CV_8U); imshow("基于均值的线性拉伸", dst); } cout << "ave:" << mean(dst) << endl<<endl; imshow("原始图像", ori); waitKey(10); } }
|