#include "stdafx.h" #include "BaseFunction.h" #include #include #include #include "OTSParticle.h" #include "OTSImageProcessParam.h" #include #include "OTSMorphology.h" #include using namespace cv; using namespace std; using namespace OTSDATA; /***** get the distance between two point*****/ float getDistance(Point pointO, Point pointA) { float distance; distance = powf((pointO.x - pointA.x), 2) + powf((pointO.y - pointA.y), 2); distance = sqrtf(distance); return distance; } /***** get the distance betweem a point and a line*****/ //p is the point, A and B are the two points of the line float getDist_P2L(Point pointP, Point pointA, Point pointB) { //get the line equation Ax+By+C=0 int A = 0, B = 0, C = 0; A = pointA.y - pointB.y; B = pointB.x - pointA.x; C = pointA.x * pointB.y - pointA.y * pointB.x; //put the point into the line distance equation float distance = 0; distance = ((float)abs(A * pointP.x + B * pointP.y + C)) / ((float)sqrtf(A * A + B * B)); return distance; } int Side(Point P1, Point P2, Point point) { /*Point P1 = line.P1; Point P2 = line.P2;*/ return ((P2.y - P1.y) * point.x + (P1.x - P2.x) * point.y + (P2.x * P1.y - P1.x * P2.y)); } void FindInnerCircleInContour(vector contour, Point& center, int& radius) { Rect r = boundingRect(contour); int nL = r.x, nR = r.br().x; //the left and right boundaries of the contour int nT = r.y, nB = r.br().y; //the top and bottom boundaries of the contour double dist = 0; double maxdist = 0; for (int i = nL; i < nR; i++) //column { for (int j = nT; j < nB; j++) //row { //calculate the distance between the inside point and the contour dist = pointPolygonTest(contour, Point(i, j), true); if (dist > maxdist) { //get the point with the maximum distance maxdist = dist; center = Point(i, j); } } } radius = maxdist; //the radius is the maximum distance between the inside point and the contour } BOOL GetParticleAverageChord(std::vector listEdge, double a_PixelSize, double& dPartFTD) { // safety check double nx = 0, ny = 0; Moments mu; mu = moments(listEdge, false); nx = mu.m10 / mu.m00; ny = mu.m01 / mu.m00; //circle(cvcopyImg, Point(nx, ny), 1, (255), 1); Point ptCenter = Point((int)nx, (int)ny); // coordinate transformation Point ptPosition; int radiusNum = 0; // get ferret diameter double sumFltDiameter = 0; int interval; int edgePointNum = listEdge.size(); if (edgePointNum > 10) { interval = edgePointNum / 10;//get one line per 10 degree aproxemately } else { interval = 1; } for (int i = 0; i < edgePointNum; i++) { Point pt = listEdge[i]; ptPosition.x = abs(pt.x - ptCenter.x); ptPosition.y = abs(pt.y - ptCenter.y); if (i % interval == 0)//calculate one line per 10 point ,so to speed up.don't calculate all the diameter. { double r1 = sqrt(pow(ptPosition.x, 2) + pow(ptPosition.y, 2)); sumFltDiameter += r1; radiusNum += 1; //line(cvImageData, ptCenter, pt, Scalar(nBlackColor), nThickness, nLineType); } } if (radiusNum == 0) { dPartFTD = 0; } else { dPartFTD = a_PixelSize * sumFltDiameter / radiusNum * 2; } //imshow("feret center", cvImageData); return TRUE; } void linearSmooth5(WORD wordIn[], WORD wordOut[], int N = 255)//smooth algorithm { double in[256]; double out[256]; double smoothCurveData[256]; for (int i = 0; i < 256; i++) { in[i] = (double)wordIn[i]; } int i; if (N < 5) { for (i = 0; i <= N - 1; i++) { out[i] = in[i]; } } else { out[0] = (3.0 * in[0] + 2.0 * in[1] + in[2] - in[4]) / 5.0; out[1] = (4.0 * in[0] + 3.0 * in[1] + 2 * in[2] + in[3]) / 10.0; for (i = 2; i <= N - 3; i++) { out[i] = (in[i - 2] + in[i - 1] + in[i] + in[i + 1] + in[i + 2]) / 5.0; } out[N - 2] = (4.0 * in[N - 1] + 3.0 * in[N - 2] + 2 * in[N - 3] + in[N - 4]) / 10.0; out[N - 1] = (3.0 * in[N - 1] + 2.0 * in[N - 2] + in[N - 3] - in[N - 5]) / 5.0; } for (int i = 0; i < N; i++) { wordOut[i] = (WORD)out[i]; } } void BlurImage(CBSEImgPtr inImg) { int rows, cols; cols = inImg->GetWidth(); rows = inImg->GetHeight(); BYTE* pPixel = inImg->GetImageDataPointer(); Mat cvcopyImg = Mat(rows, cols, CV_8UC1, pPixel); //Mat blurImg; //medianBlur(cvcopyImg, cvcopyImg, 5);//get rid of the noise point. //cv::bilateralFilter cv::GaussianBlur(cvcopyImg, cvcopyImg, Size(5, 5), 2); //inImg->SetImageData(cvcopyImg.data, width, height); /*outImg = inImg;*/ } Mat GetMatDataFromBseImg(CBSEImgPtr inImg) { int rows, cols; cols = inImg->GetWidth(); rows = inImg->GetHeight(); BYTE* pPixel = inImg->GetImageDataPointer(); Mat cvcopyImg = Mat(rows, cols, CV_8UC1, pPixel); return cvcopyImg; } CBSEImgPtr GetBSEImgFromMat(Mat inImg) { CBSEImgPtr bse = CBSEImgPtr(new CBSEImg(CRect(0, 0, inImg.cols, inImg.rows))); BYTE* pPixel = inImg.data; bse->SetImageData(pPixel, inImg.cols, inImg.rows); return bse; } /*********************************************************** the enhancement algorithm is based on the proportion of each gray value in the entire image Then, as a gain factor, the proportion of all gray values less than the current gray value in the total pixels Each pixel point is adjusted. Since the gain factor of each value is the sum of the proportions of all values less than it. So the image after enhancement is brighter and darker. ************************************************************/ void ImageStretchByHistogram(const Mat& src, Mat& dst) { //judge the size of the two images if (!(src.size().width == dst.size().width)) { cout << "error" << endl; return; } double p[256], p1[256], num[256]; memset(p, 0, sizeof(p)); memset(p1, 0, sizeof(p1)); memset(num, 0, sizeof(num)); int height = src.size().height; int width = src.size().width; long wMulh = height * width; //statistics of each gray value in the image for (int x = 0; x < width; x++) { for (int y = 0; y < height; y++) { uchar v = src.at(y, x); num[v]++; } } //using the number of each gray value to calculate the proportion of each gray value in the total pixels for (int i = 0; i < 256; i++) { p[i] = num[i] / wMulh; } //calculate the cumulative distribution function //p1[i]=sum(p[j]); j<=i; for (int i = 0; i < 256; i++) { for (int k = 0; k <= i; k++) p1[i] += p[k]; } //using the cumulative distribution function to adjust the pixel value for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { uchar v = src.at(y, x); dst.at(y, x) = p1[v] * 255 + 0.5; } } return; } //adjust the contrast of the image Mat AdjustContrastY(const Mat& img) { Mat out = Mat::zeros(img.size(), CV_8UC1); Mat workImg = img.clone(); //enhance the image ImageStretchByHistogram(workImg, out); return Mat(out); } void CVRemoveBG(const cv::Mat& img, cv::Mat& dst,int bgstart,int bgend/*, long& nNumParticle*/) { int min_gray = bgstart; int max_gray = bgend; if (img.empty()) { std::cout << "empty image"; return; } Mat image = img.clone(); if (image.channels() != 1) { cv::cvtColor(image, image, cv::COLOR_BGR2GRAY); } //lut: lookup table,exclude the gray value less than min_gray and greater than max_gray uchar lutvalues[256]; for (int i = 0; i < 256; i++) { if (i <= min_gray || i >= max_gray) { lutvalues[i] = 255; /*nNumParticle++;*/ } else { lutvalues[i] = 0; } } cv::Mat lutpara(1, 256, CV_8UC1, lutvalues); cv::LUT(image, lutpara, image); cv::Mat out_fill0, out_fill; //open calculation cv::morphologyEx(image, out_fill0, cv::MorphTypes::MORPH_OPEN, cv::getStructuringElement(0, cv::Size(5, 1)), cv::Point(-1, -1), 1); cv::morphologyEx(image, out_fill, cv::MorphTypes::MORPH_OPEN, cv::getStructuringElement(0, cv::Size(1, 5)), cv::Point(-1, -1), 1); out_fill = out_fill + out_fill0; //close calculation cv::morphologyEx(out_fill, out_fill, cv::MorphTypes::MORPH_CLOSE, cv::getStructuringElement(0, cv::Size(3, 3)), cv::Point(-1, -1), 1); //binary thresholding cv::threshold(out_fill, out_fill, 1, 255, cv::ThresholdTypes::THRESH_BINARY); dst = out_fill.clone(); } void RemoveBG_old(const cv::Mat& img, cv::Mat& dst, int nBGStart, int nBGEnd,long& nNumParticle) { int w, h; w = img.cols; h = img.rows; BYTE* pSrcImg = img.data; BYTE* pPixel = new BYTE[w * h]; BYTE* pTempImg = new BYTE[w * h]; for (unsigned int i = 0; i < w*h; i++) { if (pSrcImg[i] < nBGStart || pSrcImg[i] > nBGEnd) { pPixel[i] = 255; nNumParticle++; } else { pPixel[i] = 0; } } int errodDilateParam =5; if (errodDilateParam > 0) { BErode3(pPixel, pTempImg, errodDilateParam, h, w); BDilate3(pTempImg, pPixel, errodDilateParam, h, w); } dst.data = pPixel; delete[] pTempImg; } void AutoRemove_background_OTS(const cv::Mat& img, cv::Mat& dst, int black_thing, int min_size, int min_gray) { if (img.empty()) { return; } Mat image = img.clone(); if (image.channels() != 1) { cv::cvtColor(image, image, cv::COLOR_BGR2GRAY); } cv::Scalar mean, std; cv::meanStdDev(image, mean, std); auto a = mean[0]; auto d = std[0]; bool direct_binary = false; if (a > 240)//bright background, dark particle;direct binary process ;particular case { direct_binary = true; } bool both_black_bright = false; auto parame0 = black_thing; auto parame1 = min_size; auto parame2 = min_gray; if (parame0 == 2) { both_black_bright = true; } //adaptive manifold filter cv::Ptr pAdaptiveManifoldFilter = cv::ximgproc::createAMFilter(3.0, 0.1, true); cv::Mat temp1, dst_adapt; cv::Mat out_thresh;//get the binary image of the extracted particle if (direct_binary) { int min = 30; int thre = a - d - 50; if ((a - d - 50) < 30) { thre = min; } cv::threshold(image, out_thresh, thre, 255, cv::ThresholdTypes::THRESH_BINARY_INV); } else { cv::GaussianBlur(image, temp1, cv::Size(3, 3), 1.0, 1.0); pAdaptiveManifoldFilter->filter(temp1, dst_adapt, image); //dst_adapt = image; cv::ThresholdTypes img_ThresholdTypes = cv::ThresholdTypes::THRESH_BINARY_INV; cv::Mat image_Negate; if (both_black_bright) { //get the dark object cv::Mat black_t; int min_gray = 0; float segma_b = 1.5; int max_gray = int(a - d * segma_b); max_gray = std::min(max_gray, 255); uchar lutvalues[256]; for (int i = 0; i < 256; i++) { if (i >= min_gray && i <= max_gray) { lutvalues[i] = 255; } else { lutvalues[i] = 0; } } cv::Mat lutpara(1, 256, CV_8UC1, lutvalues); cv::LUT(dst_adapt, lutpara, black_t); //get the bright object cv::Mat bright_t; int min_gray_bright = int(a + d * segma_b); int max_gray_bright = 255; min_gray_bright = std::max(min_gray_bright, 120); uchar lutvalues1[256]; for (int i = 0; i < 256; i++) { if (i >= min_gray_bright && i <= max_gray_bright) { lutvalues1[i] = 255; } else { lutvalues1[i] = 0; } } cv::Mat lutpara1(1, 256, CV_8UC1, lutvalues1); cv::LUT(dst_adapt, lutpara1, bright_t); out_thresh = black_t + bright_t; //cv::threshold(out_thresh, out_thresh, 1, 255, cv::ThresholdTypes::THRESH_BINARY); } else { //convert the image to its negative image if (!direct_binary && (parame0 == 0))//dark particle,bright background { image_Negate = image; } else { dst_adapt = ~dst_adapt; image_Negate = ~image; } //triangle thresholding auto result_THRESH_TRIANGLE = cv::threshold(dst_adapt, out_thresh, 100, 255, cv::ThresholdTypes::THRESH_TRIANGLE | img_ThresholdTypes); cv::Mat extractedImage; cv::bitwise_and(image_Negate, image_Negate, extractedImage, out_thresh = out_thresh > 0); // 使用mask > 0将mask转换为二值图像 //calculate the mean and std of the extracted image cv::Scalar mean1, std1; cv::meanStdDev(extractedImage, mean1, std1, out_thresh); auto mean0 = mean1[0]; auto std0 = std1[0]; // binaryImage;remove the pixels greater than the threshold cv::Mat binaryImage = cv::Mat::zeros(image_Negate.size(), image_Negate.type()); //the filter coefficient int segma = 4; float filter_gray = (mean0 + std0 / segma); //filter_gray = result_THRESH_TRIANGLE; for (int y = 0; y < extractedImage.rows; ++y) { for (int x = 0; x < extractedImage.cols; ++x) { if (extractedImage.at(y, x) >= 1 && extractedImage.at(y, x) <= (int)(filter_gray)) { binaryImage.at(y, x) = 255; //set to white(255) } } } //get the less than parame2(default 30)area cv::Mat thing_area; cv::threshold(image_Negate, thing_area, parame2, 255, img_ThresholdTypes); //out_thresh = binaryImage ; out_thresh = binaryImage + thing_area; } } cv::Mat img_draw = cv::Mat::zeros(image.size(), CV_8UC3); //get the connected components //random color cv::RNG rng(10086); cv::Mat labels, stats, controids; int number = cv::connectedComponentsWithStats(out_thresh, labels, stats, controids, 8, CV_16U); std::vector colors; vector draw_indexs; for (int i = 0; i < number; i++) { cv::Vec3b color = cv::Vec3b(rng.uniform(0, 256), rng.uniform(0, 256), rng.uniform(0, 256)); colors.emplace_back(color); auto area = stats.at(i, CC_STAT_AREA); if (area < parame1) { continue; } draw_indexs.push_back(i); } //color the connected components int w = img_draw.cols; int h = img_draw.rows; cv::Vec3b color = cv::Vec3b(0, 0, 255); for (int row = 0; row < h; row++) { for (int col = 0; col < w; col++) { int label = labels.at(row, col); if (label == 0) { continue; } auto it = std::find(draw_indexs.begin(), draw_indexs.end(), label); if (it != draw_indexs.end()) { img_draw.at(row, col) = color; } } } //color the particle on the original image //cv::Mat img_blend; //double alpha = 0.7; // set the weight of img1 //double beta = 1 - alpha; // calculate the weight of img2 //cv::cvtColor(image, image, cv::COLOR_GRAY2BGR); //cv::addWeighted(image, alpha, img_draw, beta, 0.0, img_blend); //dst = img_blend.clone(); //binary image vector outs; cv::split(img_draw, outs); dst = outs[2].clone(); }