我目前正在尝试实现高斯模糊,并没有发生模糊效果,而是增强了图像对比度。
#include <algorithm>
#include <math.h>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
void displayImage(Mat &img, unsigned int time = 0, string title = "frame") {
imshow(title, img);
waitKey(time);
}
bool isValidPoint(Mat &img, int x, int y) {
int rows = img.rows;
int cols = img.cols;
return (x >= 0 and x < rows and y >= 0 and y < cols);
}
static double square(double x) { return x * x; }
static double computeGaussianFunc(double x, double y, double mu, double sigma) {
double val =
exp(-0.5 * ((square((x - mu) / sigma)) + square((y - mu) / sigma))) /
(2 * M_PI * sigma * sigma);
return val;
}
double norm2(double val,double minVal,double maxVal){
double range = (maxVal-minVal);
double newVal = (val-minVal)/range;
return (int(255*newVal));
}
// oldMin, oldMax, newMin, newMax, oldVal
double getNormalizedValue(double oldMin, double oldMax, double newMin,
double newMax, double oldVal) {
double oldRange = (oldMax - oldMin);
double newRange = (newMax - newMin);
double newVal = (newMin + ((newRange * (oldVal - oldMin)) / (oldRange)));
return newVal;
}
static vector<vector<double>> getGuassianKernal(double sigma) {
int size = 2 * ceil(3 * sigma) + 1;
vector<vector<double>> Kernal(size, vector<double>(size, 0.0));
double sum = 0.0;
int center = size / 2;
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
int x = i - center;
int y = j - center;
Kernal[i][j] = computeGaussianFunc(i, j, size / 2, sigma);
sum += Kernal[i][j];
}
}
if (sum != 0) {
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
Kernal[i][j] /= sum;
}
}
}
return Kernal;
}
static Mat guassianFilterTransformExp(Mat &img, double Sigma) {
vector<vector<double>> filter = getGuassianKernal(Sigma);
int FiltSize = filter.size();
int trows = img.rows - FiltSize + 1;
int tcols = img.cols - FiltSize + 1;
// Final output
Mat transformed(trows, tcols, CV_8U);
// intermidiate matrix using for normalizing.
Mat inter(trows, tcols, CV_64F);
// min and max values of value after convolving with filter for normalization
double minVal = 10000.0;
double maxVal = 0.0;
for (int i = 1; i <= trows - 2; i++) {
for (int j = 1; j <= tcols - 2; j++) {
double tval = 0;
for (int x = -1; x <= 1; x++) {
for (int y = -1; y <= 1; y++) {
tval = tval + (filter[x + 1][y + 1] *
static_cast<double>(img.at<u_char>(i + x, j + y)));
minVal = min(minVal,tval);
maxVal = max(maxVal,tval);
}
}
inter.at<double>(i,j) = tval;
}
}
for (int i = 0; i < trows; i++) {
for (int j = 0; j <tcols; j++) {
double val = inter.at<double>(i,j);
double newVal = norm2(val,minVal,maxVal);
cout<<"Computed Val : "<<val<<" Src : "<<static_cast<double>(img.at<u_char>(i, j))<<" Normalized : "<<newVal<<'\n';
transformed.at<u_char>(i,j) = static_cast<u_char>(min(255.0,newVal));
}
}
return (transformed);
}
int main() {
string imPath =
"/home/panirpal/workspace/Projects/ComputerVision/data/images/chess2.jpg";
Mat img = imread(imPath, IMREAD_GRAYSCALE);
if (!img.empty()) {
displayImage(img);
Mat out = guassianFilterTransformExp(img,1.3);
displayImage(out);
} else
cerr << "image not found! exiting...";
return 0;
}
在调试时我得到了多个观察结果。首先,我根据互联网上找到的 sigma
2 * ceil(3 * sigma) + 1;
计算内核大小。因此,内核大小变为 9(给定的 sigma = 1.3 , 2*(ceil(3*1.3) + 1 )。当滤波器与图像补丁进行卷积/交叉相关操作时,内核值变得非常小,结果值(在代码中表示为 tval
)变得相当小,我想这不会令人惊讶。然后我尝试将这个 tval
标准化为 0-255 范围,因为按原样渲染 tval
导致图像几乎全黑,我使用了here提到的标准化技术之一,下面是我收集的一些日志,以了解正在发生的情况,计算的 val 代表 tval
Src 代表原始(源图像中的灰度像素值)和归一化代表代码中的newVal
。奇怪的是归一化值高于源图像,它们应该更低并平滑。输出也反映了相同的情况。
我在这里做错了什么?
Computed Val : 2.25082 Src : 167 Normalized : 177
Computed Val : 2.21529 Src : 161 Normalized : 174
Computed Val : 2.27372 Src : 159 Normalized : 179
Computed Val : 2.36755 Src : 160 Normalized : 186
Computed Val : 2.39062 Src : 177 Normalized : 188
Computed Val : 2.32093 Src : 174 Normalized : 182
Computed Val : 1.25675 Src : 168 Normalized : 99
Computed Val : 0.495057 Src : 91 Normalized : 39
Computed Val : 1.14986 Src : 39 Normalized : 90
Computed Val : 2.07179 Src : 64 Normalized : 163
Computed Val : 2.34365 Src : 132 Normalized : 184
Computed Val : 2.39052 Src : 172 Normalized : 188
输入图像(名为chess2.jpg):
输出:
注意:确保 OpenCV 已构建在您的计算机上。可以在 OpenCV 安装指南中找到。
用于构建代码的命令:
g++ exp.cpp -I/usr/local/include/opencv4 -Wl,-rpath,/usr/local/lib /usr/local/lib/libopencv_highgui.so.4.8.0 /usr/local/lib/libopencv_ml.so.4.8.0 /usr/local/lib/libopencv_objdetect.so.4.8.0 /usr/local/lib/libopencv_photo.so.4.8.0 /usr/local/lib/libopencv_stitching.so.4.8.0 /usr/local/lib/libopencv_video.so.4.8.0 /usr/local/lib/libopencv_videoio.so.4.8.0 /usr/local/lib/libopencv_imgcodecs.so.4.8.0 /usr/local/lib/libopencv_calib3d.so.4.8.0 /usr/local/lib/libopencv_dnn.so.4.8.0 /usr/local/lib/libopencv_features2d.so.4.8.0 /usr/local/lib/libopencv_flann.so.4.8.0 /usr/local/lib/libopencv_imgproc.so.4.8.0 /usr/local/lib/libopencv_core.so.4.8.0 -lm -o exp
预期输出为模糊图像。
在高斯滤波器函数中,您创建一个正确大小的高斯核,读取这些大小并根据它调整图像上的循环,但内部循环是:
for (int x = -1; x <= 1; x++) {
for (int y = -1; y <= 1; y++) {
仅迭代内核左上角的 3x3 区域。
我不喜欢过滤器函数返回比输入时更小的图像,因为这使得很难将过滤器结果与原始图像结合起来。但考虑到您确实返回了较小的图像,您可以通过不用担心内核中心在哪里来简化代码:
static Mat guassianFilterTransformExp(Mat &img, double Sigma) {
vector<vector<double>> filter = getGuassianKernal(Sigma);
int FiltSize = filter.size();
int trows = img.rows - FiltSize + 1;
int tcols = img.cols - FiltSize + 1;
// intermidiate matrix using for normalizing.
Mat transformed(trows, tcols, CV_8U);
for (int i = 0; i < trows; i++) {
for (int j = 0; j < tcols; j++) {
double tval = 0;
for (int x = 0; x < FiltSize; x++) {
for (int y = 0; y < FiltSize; y++) {
tval = tval + (filter[x][y] *
static_cast<double>(img.at<u_char>(i + x, j + y)));
}
}
tval = std::min(tval, 255.0);
teal = std::max(tval, 0.0);
transformed.at<u_char>(i,j) = static_cast<u_char>(tval);
}
}
return (transformed);
}