wakasann
1/3/2020 - 4:17 AM

opencv3 5.1

#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>

using namespace std;
using namespace cv;

// 5.1 访问图像中的像素

//全局函数声明

//方法一 指针访问:c操作符[]
void colorReduce(Mat& inputImage,Mat& outputImage,int div);
//方法二迭代器 iterator
void colorReduce2(Mat& inputImage, Mat& outputImage, int div);
//方法三 动态地址计算
void colorReduce3(Mat& inputImage, Mat& outputImage, int div);

bool ROI_AddImage();

int main()
{
	//Mat src = imread("C:/Users/Administrator/Desktop/test.jpg");
	//图片需要放在 *.vcxproj 同级目录下
	Mat src = imread("images/test.jpg");
	imshow("原始图片", src);
	
	//效果图的大小,类型与原图片相同
	Mat dstImage1;
	dstImage1.create(src.rows, src.cols, src.type());

	Mat dstImage2;
	dstImage2.create(src.rows, src.cols, src.type());

	Mat dstImage3;
	dstImage3.create(src.rows, src.cols, src.type());
	
	double time0 = static_cast<double>(getTickCount());
	colorReduce(src,dstImage1,32);

	//计算运行时间并输出
	time0 = ((double)getTickCount() - time0) / getTickFrequency();

	cout << "此方法使用了" << time0 << "秒" << endl;

	imshow("方法一图片",dstImage1);


	double time1 = static_cast<double>(getTickCount());
	colorReduce2(src, dstImage2, 32);

	//计算运行时间并输出
	time1 = ((double)getTickCount() - time1) / getTickFrequency();

	cout << "此方法二使用了" << time1 << "秒" << endl;

	imshow("方法二图片", dstImage2);

	double time2 = static_cast<double>(getTickCount());
	colorReduce3(src, dstImage3, 32);

	//计算运行时间并输出
	time2 = ((double)getTickCount() - time2) / getTickFrequency();

	cout << "此方法三使用了" << time2 << "秒" << endl;

	imshow("方法三图片", dstImage3);

	//ROI
	ROI_AddImage();

	waitKey(0);
	return 0;
}

void colorReduce(Mat& inputImage, Mat& outputImage, int div)
{
	outputImage = inputImage.clone();
	int rowNumber = outputImage.rows; //行数
	int colNumber = outputImage.cols * outputImage.channels(); //列数 x 通道=每一行元素的个数

	for (int i = 0; i < rowNumber; i++)
	{
		uchar* data = outputImage.ptr<uchar>(i);
		for(int j = 0;j < colNumber;j++)
		{
			data[j] = data[j] / div * div + div / 2;
		}

	}
}

void colorReduce2(Mat& inputImage, Mat& outputImage, int div)
{
	outputImage = inputImage.clone();
	//获取迭代器
	Mat_<Vec3b>::iterator it = outputImage.begin<Vec3b>(); //初始位置的迭代器
	Mat_<Vec3b>::iterator itend = outputImage.end<Vec3b>();//终止位置的迭代器
	for (; it != itend; ++it)
	{
		(*it)[0] = (*it)[0] / div * div + div / 2;
		(*it)[1] = (*it)[1] / div * div + div / 2;
		(*it)[2] = (*it)[2] / div * div + div / 2;
	}
}

void colorReduce3(Mat& inputImage, Mat& outputImage, int div)
{
	outputImage = inputImage.clone();
	int rowNumber = outputImage.rows; //行数
	int colNumber = outputImage.cols ; //列数

	//对于彩色图像,每个图像由3个部分构成,蓝色通道、绿色通道和红色通道(BGR)。
	//因此对于一个包含彩色图形的Mat,会返回一个由三个8位数组成的向量。
	//Opencv将此类型的向量定义为Vec3b,即由三个unsigned char组成的向量
	for (int i = 0; i < rowNumber; i++)
	{
		
		for (int j = 0; j < colNumber; j++)
		{
			outputImage.at<Vec3b>(i, j)[0] = outputImage.at<Vec3b>(i, j)[0] / div * div + div / 2;
			outputImage.at<Vec3b>(i, j)[1] = outputImage.at<Vec3b>(i, j)[1] / div * div + div / 2;
			outputImage.at<Vec3b>(i, j)[2] = outputImage.at<Vec3b>(i, j)[2] / div * div + div / 2;
		}

	}
}

bool ROI_AddImage()
{
	Mat srcImage = imread("images/test.jpg");;

	Mat logoImage = imread("images/logo.jpg");

	if (!srcImage.data)
	{
		printf("读取原图片错误! \n");
		return false;
	}
	if (!logoImage.data) 
	{
		printf("读取logo图片错误! \n");
		return false;
	}
	// 定义一个Mat类型,并给其设定ROI区域
	Mat imageRoi = srcImage(Rect(0,0,logoImage.cols,logoImage.rows));

	Mat mask = imread("images/logo.jpg",0);

	logoImage.copyTo(imageRoi, mask);

	addWeighted(imageRoi, 0.5,logoImage,0.3,0.0,imageRoi);

	namedWindow("利用ROI实现图像叠加(一)");

	imshow("利用ROI实现图像叠加(一)",srcImage);
	return true;
}