BaseFunction.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530
  1. #include "stdafx.h"
  2. #include "BaseFunction.h"
  3. #include <opencv2/core/core.hpp>
  4. #include <opencv2/highgui/highgui.hpp>
  5. #include <opencv2/opencv.hpp>
  6. #include "OTSParticle.h"
  7. #include "OTSImageProcessParam.h"
  8. #include <OTSFieldData.h>
  9. #include "OTSMorphology.h"
  10. #include <opencv2/ximgproc/edge_filter.hpp>
  11. using namespace cv;
  12. using namespace std;
  13. using namespace OTSDATA;
  14. /***** 求两点间距离*****/
  15. float getDistance(Point pointO, Point pointA)
  16. {
  17. float distance;
  18. distance = powf((pointO.x - pointA.x), 2) + powf((pointO.y - pointA.y), 2);
  19. distance = sqrtf(distance);
  20. return distance;
  21. }
  22. /***** 点到直线的距离:P到AB的距离*****/
  23. //P为线外一点,AB为线段两个端点
  24. float getDist_P2L(Point pointP, Point pointA, Point pointB)
  25. {
  26. //求直线方程
  27. int A = 0, B = 0, C = 0;
  28. A = pointA.y - pointB.y;
  29. B = pointB.x - pointA.x;
  30. C = pointA.x * pointB.y - pointA.y * pointB.x;
  31. //代入点到直线距离公式
  32. float distance = 0;
  33. distance = ((float)abs(A * pointP.x + B * pointP.y + C)) / ((float)sqrtf(A * A + B * B));
  34. return distance;
  35. }
  36. int Side(Point P1, Point P2, Point point)
  37. {
  38. /*Point P1 = line.P1;
  39. Point P2 = line.P2;*/
  40. return ((P2.y - P1.y) * point.x + (P1.x - P2.x) * point.y + (P2.x * P1.y - P1.x * P2.y));
  41. }
  42. void FindInnerCircleInContour(vector<Point> contour, Point& center, int& radius)
  43. {
  44. Rect r = boundingRect(contour);
  45. int nL = r.x, nR = r.br().x; //轮廓左右边界
  46. int nT = r.y, nB = r.br().y; //轮廓上下边界
  47. double dist = 0;
  48. double maxdist = 0;
  49. for (int i = nL; i < nR; i++) //列
  50. {
  51. for (int j = nT; j < nB; j++) //行
  52. {
  53. //计算轮廓内部各点到最近轮廓点的距离
  54. dist = pointPolygonTest(contour, Point(i, j), true);
  55. if (dist > maxdist)
  56. {
  57. //求最大距离,只有轮廓最中心的点才距离最大
  58. maxdist = dist;
  59. center = Point(i, j);
  60. }
  61. }
  62. }
  63. radius = maxdist; //圆半径
  64. }
  65. BOOL GetParticleAverageChord(std::vector<Point> listEdge, double a_PixelSize, double& dPartFTD)
  66. {
  67. // safety check
  68. double nx = 0, ny = 0;
  69. Moments mu;
  70. mu = moments(listEdge, false);
  71. nx = mu.m10 / mu.m00;
  72. ny = mu.m01 / mu.m00;
  73. //circle(cvcopyImg, Point(nx, ny), 1, (255), 1);
  74. Point ptCenter = Point((int)nx, (int)ny);
  75. // coordinate transformation
  76. Point ptPosition;
  77. int radiusNum = 0;
  78. // get ferret diameter
  79. double sumFltDiameter = 0;
  80. int interval;
  81. int edgePointNum = listEdge.size();
  82. if (edgePointNum > 10)
  83. {
  84. interval = edgePointNum / 10;//get one line per 10 degree aproxemately
  85. }
  86. else
  87. {
  88. interval = 1;
  89. }
  90. for (int i = 0; i < edgePointNum; i++)
  91. {
  92. Point pt = listEdge[i];
  93. ptPosition.x = abs(pt.x - ptCenter.x);
  94. ptPosition.y = abs(pt.y - ptCenter.y);
  95. if (i % interval == 0)//calculate one line per 10 point ,so to speed up.don't calculate all the diameter.
  96. {
  97. double r1 = sqrt(pow(ptPosition.x, 2) + pow(ptPosition.y, 2));
  98. sumFltDiameter += r1;
  99. radiusNum += 1;
  100. //line(cvImageData, ptCenter, pt, Scalar(nBlackColor), nThickness, nLineType);
  101. }
  102. }
  103. if (radiusNum == 0)
  104. {
  105. dPartFTD = 0;
  106. }
  107. else
  108. {
  109. dPartFTD = a_PixelSize * sumFltDiameter / radiusNum * 2;
  110. }
  111. //imshow("feret center", cvImageData);
  112. return TRUE;
  113. }
  114. void linearSmooth5(WORD wordIn[], WORD wordOut[], int N = 255)//smooth algorithm
  115. {
  116. double in[256];
  117. double out[256];
  118. double smoothCurveData[256];
  119. for (int i = 0; i < 256; i++)
  120. {
  121. in[i] = (double)wordIn[i];
  122. }
  123. int i;
  124. if (N < 5)
  125. {
  126. for (i = 0; i <= N - 1; i++)
  127. {
  128. out[i] = in[i];
  129. }
  130. }
  131. else
  132. {
  133. out[0] = (3.0 * in[0] + 2.0 * in[1] + in[2] - in[4]) / 5.0;
  134. out[1] = (4.0 * in[0] + 3.0 * in[1] + 2 * in[2] + in[3]) / 10.0;
  135. for (i = 2; i <= N - 3; i++)
  136. {
  137. out[i] = (in[i - 2] + in[i - 1] + in[i] + in[i + 1] + in[i + 2]) / 5.0;
  138. }
  139. out[N - 2] = (4.0 * in[N - 1] + 3.0 * in[N - 2] + 2 * in[N - 3] + in[N - 4]) / 10.0;
  140. out[N - 1] = (3.0 * in[N - 1] + 2.0 * in[N - 2] + in[N - 3] - in[N - 5]) / 5.0;
  141. }
  142. for (int i = 0; i < N; i++)
  143. {
  144. wordOut[i] = (WORD)out[i];
  145. }
  146. }
  147. void BlurImage(CBSEImgPtr inImg)
  148. {
  149. int rows, cols;
  150. cols = inImg->GetWidth();
  151. rows = inImg->GetHeight();
  152. BYTE* pPixel = inImg->GetImageDataPointer();
  153. Mat cvcopyImg = Mat(rows, cols, CV_8UC1, pPixel);
  154. //Mat blurImg;
  155. //medianBlur(cvcopyImg, cvcopyImg, 11);//get rid of the noise point.
  156. //cv::bilateralFilter
  157. cv::GaussianBlur(cvcopyImg, cvcopyImg, Size(5, 5), 2);
  158. //inImg->SetImageData(cvcopyImg.data, width, height);
  159. /*outImg = inImg;*/
  160. }
  161. Mat GetMatDataFromBseImg(CBSEImgPtr inImg)
  162. {
  163. int rows, cols;
  164. cols = inImg->GetWidth();
  165. rows = inImg->GetHeight();
  166. BYTE* pPixel = inImg->GetImageDataPointer();
  167. Mat cvcopyImg = Mat(rows, cols, CV_8UC1, pPixel);
  168. return cvcopyImg;
  169. }
  170. CBSEImgPtr GetBSEImgFromMat(Mat inImg)
  171. {
  172. CBSEImgPtr bse = CBSEImgPtr(new CBSEImg(CRect(0, 0, inImg.cols, inImg.rows)));
  173. BYTE* pPixel = inImg.data;
  174. bse->SetImageData(pPixel, inImg.cols, inImg.rows);
  175. return bse;
  176. }
  177. /***********************************************************
  178. 增强算法的原理在于先统计每个灰度值在整个图像中所占的比例
  179. 然后以小于当前灰度值的所有灰度值在总像素中所占的比例,作为增益系数
  180. 对每一个像素点进行调整。由于每一个值的增益系数都是小于它的所有值所占
  181. 的比例和。所以就使得经过增强之后的图像亮的更亮,暗的更暗。
  182. ************************************************************/
  183. void ImageStretchByHistogram(const Mat& src, Mat& dst)
  184. {
  185. //判断传入参数是否正常
  186. if (!(src.size().width == dst.size().width))
  187. {
  188. cout << "error" << endl;
  189. return;
  190. }
  191. double p[256], p1[256], num[256];
  192. memset(p, 0, sizeof(p));
  193. memset(p1, 0, sizeof(p1));
  194. memset(num, 0, sizeof(num));
  195. int height = src.size().height;
  196. int width = src.size().width;
  197. long wMulh = height * width;
  198. //统计每一个灰度值在整个图像中所占个数
  199. for (int x = 0; x < width; x++)
  200. {
  201. for (int y = 0; y < height; y++)
  202. {
  203. uchar v = src.at<uchar>(y, x);
  204. num[v]++;
  205. }
  206. }
  207. //使用上一步的统计结果计算每一个灰度值所占总像素的比例
  208. for (int i = 0; i < 256; i++)
  209. {
  210. p[i] = num[i] / wMulh;
  211. }
  212. //计算每一个灰度值,小于当前灰度值的所有灰度值在总像素中所占的比例
  213. //p1[i]=sum(p[j]); j<=i;
  214. for (int i = 0; i < 256; i++)
  215. {
  216. for (int k = 0; k <= i; k++)
  217. p1[i] += p[k];
  218. }
  219. //以小于当前灰度值的所有灰度值在总像素中所占的比例,作为增益系数对每一个像素点进行调整。
  220. for (int y = 0; y < height; y++)
  221. {
  222. for (int x = 0; x < width; x++) {
  223. uchar v = src.at<uchar>(y, x);
  224. dst.at<uchar>(y, x) = p1[v] * 255 + 0.5;
  225. }
  226. }
  227. return;
  228. }
  229. //调整图像对比度
  230. Mat AdjustContrastY(const Mat& img)
  231. {
  232. Mat out = Mat::zeros(img.size(), CV_8UC1);
  233. Mat workImg = img.clone();
  234. //对图像进行对比度增强
  235. ImageStretchByHistogram(workImg, out);
  236. return Mat(out);
  237. }
  238. void CVRemoveBG(const cv::Mat& img, cv::Mat& dst,int bgstart,int bgend/*, long& nNumParticle*/)
  239. {
  240. int min_gray = bgstart;
  241. int max_gray = bgend;
  242. if (img.empty())
  243. {
  244. std::cout << "图像为空";
  245. return;
  246. }
  247. Mat image = img.clone();
  248. if (image.channels() != 1)
  249. {
  250. cv::cvtColor(image, image, cv::COLOR_BGR2GRAY);
  251. }
  252. //lut 查找表 取规定范围的灰度图 排除拼图时四周灰度为255区域 以及 灰度值较低的区域
  253. uchar lutvalues[256];
  254. for (int i = 0; i < 256; i++)
  255. {
  256. if (i <= min_gray || i >= max_gray)
  257. {
  258. lutvalues[i] = 255;
  259. /*nNumParticle++;*/
  260. }
  261. else
  262. {
  263. lutvalues[i] = 0;
  264. }
  265. }
  266. cv::Mat lutpara(1, 256, CV_8UC1, lutvalues);
  267. cv::LUT(image, lutpara, image);
  268. cv::Mat out_fill0, out_fill;
  269. //开运算 获得x>5 的元素
  270. cv::morphologyEx(image, out_fill0, cv::MorphTypes::MORPH_OPEN, cv::getStructuringElement(0, cv::Size(5, 1)), cv::Point(-1, -1), 1);
  271. cv::morphologyEx(image, out_fill, cv::MorphTypes::MORPH_OPEN, cv::getStructuringElement(0, cv::Size(1, 5)), cv::Point(-1, -1), 1);
  272. out_fill = out_fill + out_fill0;
  273. //闭运算
  274. cv::morphologyEx(out_fill, out_fill, cv::MorphTypes::MORPH_CLOSE, cv::getStructuringElement(0, cv::Size(3, 3)), cv::Point(-1, -1), 1);
  275. //二值
  276. cv::threshold(out_fill, out_fill, 1, 255, cv::ThresholdTypes::THRESH_BINARY);
  277. dst = out_fill.clone();
  278. }
  279. void RemoveBG_old(const cv::Mat& img, cv::Mat& dst, int nBGStart, int nBGEnd,long& nNumParticle)
  280. {
  281. int w, h;
  282. w = img.cols;
  283. h = img.rows;
  284. BYTE* pSrcImg = img.data;
  285. BYTE* pPixel = new BYTE[w * h];
  286. BYTE* pTempImg = new BYTE[w * h];
  287. for (unsigned int i = 0; i < w*h; i++)
  288. {
  289. if (pSrcImg[i] < nBGStart || pSrcImg[i] > nBGEnd)
  290. {
  291. pPixel[i] = 255;
  292. nNumParticle++;
  293. }
  294. else
  295. {
  296. pPixel[i] = 0;
  297. }
  298. }
  299. int errodDilateParam =5;
  300. if (errodDilateParam > 0)
  301. {
  302. BErode3(pPixel, pTempImg, errodDilateParam, h, w);
  303. BDilate3(pTempImg, pPixel, errodDilateParam, h, w);
  304. }
  305. dst.data = pPixel;
  306. delete[] pTempImg;
  307. }
  308. void AutoRemove_background_OTS(const cv::Mat& img, cv::Mat& dst, int black_thing, int min_size, int min_gray)
  309. {
  310. if (img.empty())
  311. {
  312. //ui.statusBar->showMessage(QString("图像为空"));
  313. return;
  314. }
  315. Mat image = img.clone();
  316. if (image.channels() != 1)
  317. {
  318. cv::cvtColor(image, image, cv::COLOR_BGR2GRAY);
  319. }
  320. cv::Scalar mean, std;
  321. cv::meanStdDev(image, mean, std);
  322. auto a = mean[0];
  323. auto d = std[0];
  324. bool direct_binary = false;
  325. if (a > 240)//全亮背景 暗颗粒;直接二值提取 ;特殊情况
  326. {
  327. direct_binary = true;
  328. }
  329. bool both_black_bright = false;
  330. auto parame0 = black_thing;
  331. auto parame1 = min_size;
  332. auto parame2 = min_gray;
  333. if (parame0 == 2)
  334. {
  335. both_black_bright = true;
  336. }
  337. //自适应滤波
  338. cv::Ptr<cv::ximgproc::AdaptiveManifoldFilter> pAdaptiveManifoldFilter
  339. = cv::ximgproc::createAMFilter(3.0, 0.1, true);
  340. cv::Mat temp1, dst_adapt;
  341. cv::Mat out_thresh;//提取前景二值图
  342. if (direct_binary)
  343. {
  344. int min = 30;
  345. int thre = a - d - 50;
  346. if ((a - d - 50) < 30)
  347. {
  348. thre = min;
  349. }
  350. cv::threshold(image, out_thresh, thre, 255, cv::ThresholdTypes::THRESH_BINARY_INV);
  351. }
  352. else
  353. {
  354. cv::GaussianBlur(image, temp1, cv::Size(3, 3), 1.0, 1.0);
  355. pAdaptiveManifoldFilter->filter(temp1, dst_adapt, image);
  356. //dst_adapt = image;
  357. cv::ThresholdTypes img_ThresholdTypes = cv::ThresholdTypes::THRESH_BINARY_INV;
  358. cv::Mat image_Negate;
  359. if (both_black_bright)
  360. {
  361. //提取暗物体
  362. cv::Mat black_t;
  363. int min_gray = 0;
  364. float segma_b = 1.5;
  365. int max_gray = int(a - d * segma_b);
  366. max_gray = std::min(max_gray, 255);
  367. uchar lutvalues[256];
  368. for (int i = 0; i < 256; i++)
  369. {
  370. if (i >= min_gray && i <= max_gray)
  371. {
  372. lutvalues[i] = 255;
  373. }
  374. else
  375. {
  376. lutvalues[i] = 0;
  377. }
  378. }
  379. cv::Mat lutpara(1, 256, CV_8UC1, lutvalues);
  380. cv::LUT(dst_adapt, lutpara, black_t);
  381. //提取亮物体
  382. cv::Mat bright_t;
  383. int min_gray_bright = int(a + d * segma_b);
  384. int max_gray_bright = 255;
  385. min_gray_bright = std::max(min_gray_bright, 120);
  386. uchar lutvalues1[256];
  387. for (int i = 0; i < 256; i++)
  388. {
  389. if (i >= min_gray_bright && i <= max_gray_bright)
  390. {
  391. lutvalues1[i] = 255;
  392. }
  393. else
  394. {
  395. lutvalues1[i] = 0;
  396. }
  397. }
  398. cv::Mat lutpara1(1, 256, CV_8UC1, lutvalues1);
  399. cv::LUT(dst_adapt, lutpara1, bright_t);
  400. out_thresh = black_t + bright_t;
  401. //cv::threshold(out_thresh, out_thresh, 1, 255, cv::ThresholdTypes::THRESH_BINARY);
  402. }
  403. else
  404. {
  405. //统一将提取物转换为暗物质亮背景
  406. if (!direct_binary && (parame0 == 0))//暗物体,暗背景
  407. {
  408. image_Negate = image;
  409. }
  410. else
  411. {
  412. dst_adapt = ~dst_adapt;
  413. image_Negate = ~image;
  414. }
  415. //三角阈值
  416. auto result_THRESH_TRIANGLE = cv::threshold(dst_adapt, out_thresh, 100, 255, cv::ThresholdTypes::THRESH_TRIANGLE | img_ThresholdTypes);
  417. cv::Mat extractedImage;
  418. cv::bitwise_and(image_Negate, image_Negate, extractedImage, out_thresh = out_thresh > 0); // 使用mask > 0将mask转换为二值图像
  419. // 计算提取区域的均值和方差
  420. cv::Scalar mean1, std1;
  421. cv::meanStdDev(extractedImage, mean1, std1, out_thresh);
  422. auto mean0 = mean1[0];
  423. auto std0 = std1[0];
  424. // binaryImage二值图像;去除部分扩大区域
  425. cv::Mat binaryImage = cv::Mat::zeros(image_Negate.size(), image_Negate.type());
  426. //筛选系数
  427. int segma = 4;
  428. float filter_gray = (mean0 + std0 / segma);
  429. //filter_gray = result_THRESH_TRIANGLE;
  430. for (int y = 0; y < extractedImage.rows; ++y) {
  431. for (int x = 0; x < extractedImage.cols; ++x) {
  432. if (extractedImage.at<uchar>(y, x) >= 1 && extractedImage.at<uchar>(y, x) <= (int)(filter_gray)) {
  433. binaryImage.at<uchar>(y, x) = 255; // 设置为白色(255)
  434. }
  435. }
  436. }
  437. //直接提取小于parame2(默认为30)的区域
  438. cv::Mat thing_area;
  439. cv::threshold(image_Negate, thing_area, parame2, 255, img_ThresholdTypes);
  440. //out_thresh = binaryImage ;
  441. out_thresh = binaryImage + thing_area;
  442. }
  443. }
  444. cv::Mat img_draw = cv::Mat::zeros(image.size(), CV_8UC3);
  445. //连通域过滤绘制颗粒
  446. //随机颜色
  447. cv::RNG rng(10086);
  448. cv::Mat labels, stats, controids;
  449. int number = cv::connectedComponentsWithStats(out_thresh, labels, stats, controids, 8, CV_16U);
  450. std::vector<cv::Vec3b> colors;
  451. vector<int> draw_indexs;
  452. for (int i = 0; i < number; i++)
  453. {
  454. cv::Vec3b color = cv::Vec3b(rng.uniform(0, 256), rng.uniform(0, 256), rng.uniform(0, 256));
  455. colors.emplace_back(color);
  456. auto area = stats.at<int>(i, CC_STAT_AREA);
  457. if (area < parame1)
  458. {
  459. continue;
  460. }
  461. draw_indexs.push_back(i);
  462. }
  463. //染色 过滤
  464. int w = img_draw.cols;
  465. int h = img_draw.rows;
  466. cv::Vec3b color = cv::Vec3b(0, 0, 255);
  467. for (int row = 0; row < h; row++)
  468. {
  469. for (int col = 0; col < w; col++)
  470. {
  471. int label = labels.at<uint16_t>(row, col);
  472. if (label == 0)
  473. {
  474. continue;
  475. }
  476. auto it = std::find(draw_indexs.begin(), draw_indexs.end(), label);
  477. if (it != draw_indexs.end())
  478. {
  479. img_draw.at<Vec3b>(row, col) = color;
  480. }
  481. }
  482. }
  483. //原图染色
  484. //cv::Mat img_blend;
  485. //double alpha = 0.7; // 设定img1的权重
  486. //double beta = 1 - alpha; // 计算img2的权重
  487. //cv::cvtColor(image, image, cv::COLOR_GRAY2BGR);
  488. //cv::addWeighted(image, alpha, img_draw, beta, 0.0, img_blend);
  489. //dst = img_blend.clone();
  490. //二值图
  491. vector<cv::Mat> outs;
  492. cv::split(img_draw, outs);
  493. dst = outs[2].clone();
  494. }