Commit 981e1f86 authored by Zhiping Jiang (蒋志平)'s avatar Zhiping Jiang (蒋志平) 💬

CoreOpenCVExtensions project init

parent aca63031
......@@ -6,14 +6,8 @@
// Copyright © 2016年 JiangZhping. All rights reserved.
//
#import <UIKit/UIKit.h>
//! Project version number for CoreOpenCVExtensions.
FOUNDATION_EXPORT double CoreOpenCVExtensionsVersionNumber;
//! Project version string for CoreOpenCVExtensions.
FOUNDATION_EXPORT const unsigned char CoreOpenCVExtensionsVersionString[];
// In this header, you should import all the public headers of your framework using statements like #import <CoreOpenCVExtensions/PublicHeader.h>
#import <CoreOpenCVExtensions/jzplib_bwOps.h>
#import <CoreOpenCVExtensions/jzplib_camera.h>
#import <CoreOpenCVExtensions/jzplib_draw.h>
#import <CoreOpenCVExtensions/jzplib_geom.h>
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>en</string>
<key>CFBundleExecutable</key>
<string>$(EXECUTABLE_NAME)</string>
<key>CFBundleIdentifier</key>
<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>$(PRODUCT_NAME)</string>
<key>CFBundlePackageType</key>
<string>FMWK</string>
<key>CFBundleShortVersionString</key>
<string>1.0</string>
<key>CFBundleVersion</key>
<string>$(CURRENT_PROJECT_VERSION)</string>
<key>NSHumanReadableCopyright</key>
<string>Copyright © 2016年 JiangZhping. All rights reserved.</string>
<key>NSPrincipalClass</key>
<string></string>
</dict>
</plist>
//
// jzplib_bwOps.cpp
// OPENCV_HOTSHOTS
//
// Created by Zhiping Jiang on 14-7-18.
//
//
#include "jzplib_bwOps.h"
namespace cve {
Mat fillHoleInBinary(Mat bwImage) {
vector<vector<Point> > contours, onlyContours(1);
vector<Vec4i> hierarchy;
findContours( bwImage.clone(), contours, hierarchy,
CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE );
Mat mask(bwImage.size(),CV_8UC1,Scalar::all(0));
drawContours(mask, contours, -1, Scalar(255), CV_FILLED);
return mask;
}
Mat removeSmallBlobs(Mat bwImage) {
vector<vector<Point> > contours, onlyContours(1);
vector<Vec4i> hierarchy;
findContours( bwImage.clone(), contours, hierarchy,
CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE );
vector<double> areas(contours.size());
if (contours.size() >0 ){
for(size_t i= 0 ; i < contours.size() ; i ++) {
areas[i] = contourArea(contours[i]);
// cout<<areas[i]<<",";
}
long biggestIndex = distance(areas.begin(), max_element(areas.begin(),areas.end()));
// cout<<biggestIndex<<":"<<areas[biggestIndex]<<endl;
onlyContours[0] =contours[biggestIndex];
Mat mask(bwImage.size(),CV_8UC1,Scalar::all(0));
drawContours(mask, onlyContours, -1, Scalar(255), CV_FILLED);
return mask;
}
return bwImage;
}
Mat fillConvexHulls(Mat bwImage) {
vector<vector<Point> > contours, onlyContours(1);
vector<Vec4i> hierarchy;
findContours( bwImage.clone(), contours, hierarchy,
CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE );
// Find the convex hull object for each contour
vector<vector<Point> >hull( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{
convexHull( Mat(contours[i]), hull[i], false );
}
Mat drawing = Mat::zeros( bwImage.size(), CV_8UC1 );
for( int i = 0; i< contours.size(); i++ )
{
drawContours(drawing, contours, i, Scalar(255), CV_FILLED);
drawContours( drawing, hull, i, Scalar(255), CV_FILLED);
}
return drawing;
}
}
//
// jzplib_camera.cpp
// JZP_EYE_TRACKING
//
// Created by Zhiping Jiang on 14-8-23.
//
//
#include "jzplib_camera.h"
#import "TargetConditionals.h"
namespace cve {
bool readCameraProfile(const std::string & filepath, Mat & cameraMatrix, Mat & distCoeffs) {
FileStorage fileLoader(filepath,FileStorage::READ);
fileLoader["cameraMatrix"]>>cameraMatrix;
fileLoader["distCoeffs"] >> distCoeffs;
fileLoader.release();
if (cameraMatrix.empty() || distCoeffs.empty()) {
return false;
}
else return true;
}
Mat cameraMatrixByCropNResize(const Mat originalCameraMatrix, Size originalSize, Rect currentRect, float resizeFactor) {
Mat nowcm = originalCameraMatrix;
cout<<nowcm.channels()<<" "<<nowcm.size()<<endl;
Point tl = currentRect.tl();
nowcm.at<double>(0, 2) -= double(tl.x);
nowcm.at<double>(1, 2) -= double(tl.y);
float s = nowcm.at<double>(0,1);
nowcm *= (resizeFactor);
nowcm.at<double>(0,1) = s;
nowcm.at<double >(2,2) = 1.0;
cout<<nowcm<<endl;
return nowcm;
}
bool captureImage(VideoCapture& capture, Mat& color_img, bool flipImage) {
Rect squareRect;
Mat origin;
if (capture.isOpened() && capture.read(origin)) {
if (flipImage)
flip(origin, color_img, 1);
else {
origin.copyTo(color_img);
}
return true;
} else {
return false;
}
}
vector<vector<Point3f> > calcBoardCornerPositions(int gridW, int gridH, float squareSize, int imagesCount)
{
vector<vector<Point3f> > objectPoints(imagesCount);
for (int k = 0 ; k <imagesCount; k++) {
objectPoints[k] = vector<Point3f>(0);
for( int i = 0; i < gridH; i++ )
for( int j = 0; j < gridW; j++ )
objectPoints[k].push_back(Point3f(float( j*squareSize ), float( i*squareSize ), 0));
}
// objectPoints.resize(imagesCount,objectPoints[0]);
// cout<<objectPoints.size()<<" "<<objectPoints[0].size()<<endl;
return objectPoints;
}
bool chessboardCameraCalibration(int gridW, int gridH, float gridSize, vector<std::string> imagePaths, Mat & cameraMatrix, Mat & distCoeffs, bool drawCorners) {
vector<Mat> images;
for (int i = 0; i <imagePaths.size(); i++) {
cv::Mat image = imread(imagePaths[i]);
if (image.cols < image.rows) {
cv::transpose(image, image);
}
images.push_back(image);
}
Size grids(gridW,gridH);
vector<vector<Point2f> > imagePoints;
vector<int> usefulImgIndeces;
for (int i = 0 ; i <images.size() ; i ++) {
vector<Point2f> corners;
bool found = findChessboardCorners(images[i], grids, corners,CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE+CALIB_CB_FAST_CHECK);
if (found) {
usefulImgIndeces.push_back(i);
Mat gray;
cvtColor(images[i], gray, CV_BGR2GRAY);
cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
imagePoints.push_back(corners);
if (drawCorners) {
drawChessboardCorners(images[i], grids, Mat(corners), found);
imshow("chessboard"+std::to_string(i),images[i]);
waitKey(200);
}
}
}
if (usefulImgIndeces.empty()) {
cout<<"no chessboard found."<<endl;
return false;
}
vector<vector<Point3f> > objectPoints = calcBoardCornerPositions(gridW,gridH, gridSize, (int)imagePoints.size());
Size imageSize = images[0].size();
vector<Mat> rvecs, tvecs;
calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs);
return true;
}
}
//
// jzplib_draw.cpp
// OPENCV_HOTSHOTS
//
// Created by Zhiping Jiang on 14-7-18.
//
//
#include "jzplib_draw.h"
namespace cve {
void drawBox(Mat& image, CvRect box, Scalar color, int thick){
rectangle( image, cvPoint(box.x, box.y), cvPoint(box.x+box.width,box.y+box.height),color, thick);
}
void drawConnectedPoints(Mat& image, vector<Point2f> points, Scalar color, int thick){
for (int i = 0; i<points.size()-1 && points.size()>0; i++) {
line(image, points[i], points[i+1], color,thick,CV_AA);
}
}
void drawPoints(Mat& image, vector<Point2f> points,Scalar color){
for( vector<Point2f>::const_iterator i = points.begin(), ie = points.end(); i != ie; ++i )
{
Point2f center( cvRound(i->x ), cvRound(i->y));
circle(image,*i,1,color,2);
}
}
void drawPoints(Mat& image, vector<Point2d> points,Scalar color){
for( vector<Point2d>::const_iterator i = points.begin(), ie = points.end(); i != ie; ++i )
{
Point2d center( cvRound(i->x ), cvRound(i->y));
circle(image,*i,1,color,2);
}
}
void drawRotatedRect(Mat image, RotatedRect eyeRRect) {
Point2f rect_points[4];
eyeRRect.points( rect_points );
for( int j = 0; j < 4; j++ )
line( image, rect_points[j], rect_points[(j+1)%4], Scalar(0,0,255), 1, 8 );
}
void drawStringAtPoint(Mat img, const string text, Point position) {
putText(img,text,Point(position.x+1,position.y+1),FONT_HERSHEY_SIMPLEX,0.5f,
Scalar::all(0),0.5f,CV_AA);
putText(img,text,Point(position.x,position.y),FONT_HERSHEY_SIMPLEX,0.5f,
Scalar::all(255),0.5f,CV_AA);
}
void
drawStringAtTopLeftCorner(Mat img, //image to draw on
const string text) //text to draw
{
Size size = getTextSize(text,FONT_HERSHEY_TRIPLEX,1.0f,1,NULL);
drawStringAtPoint(img, text, Point(1,size.height+1));
}
void drawColorHistGram(Mat hist) {
double maxVal=0;
minMaxLoc(hist, 0, &maxVal, 0, 0);
int hbins = 30, sbins = 32;
int scale = 10;
Mat histImg = Mat::zeros(sbins*scale, hbins*10, CV_8UC3);
for( int h = 0; h < hbins; h++ )
for( int s = 0; s < sbins; s++ )
{
float binVal = hist.at<float>(h, s);
int intensity = cvRound(binVal*255/maxVal);
rectangle( histImg, Point(h*scale, s*scale),
Point( (h+1)*scale - 1, (s+1)*scale - 1),
Scalar::all(intensity),
CV_FILLED );
}
namedWindow( "H-S Histogram", 1 );
imshow( "H-S Histogram", histImg );
}
void imagesc(string windowName, Mat image, int colormap) {
Mat display;
float Amin = *min_element(image.begin<float>(), image.end<float>());
float Amax = *max_element(image.begin<float>(), image.end<float>());
Mat A_scaled = (image - Amin)/(Amax - Amin);
A_scaled.convertTo(display, CV_8UC1, 255.0, 0);
applyColorMap(display, display, cv::COLORMAP_JET);
imshow(windowName, display);
}
}
//
// jzplib_geom.cpp
// OPENCV_HOTSHOTS
//
// Created by Zhiping Jiang on 14-7-18.
//
//
#include "jzplib_geom.h"
namespace cve {
float rad2deg(float radian) {
return radian/CV_PI*180.0f;
}
float deg2rad(float degree) {
return degree/180.0f*CV_PI;
}
vector<Point2f> Mat2PointsVector(const Mat& mat) {
vector<Point2f> pts;
mat.copyTo(pts);
return pts;
}
//Point pointsCenter()
float tileRadianBtwn2Pts(Point2f left, Point2f right) {
return atan((right.y - left.y)/abs(right.x-left.x));
}
float calculateEyePairTileAngle(const vector<Point2f>& canthusPts) {
float tile1 = rad2deg(tileRadianBtwn2Pts(canthusPts[1], canthusPts[0]));
float tile2 = rad2deg(tileRadianBtwn2Pts(canthusPts[3], canthusPts[2]));
float tile3 = rad2deg(tileRadianBtwn2Pts(canthusPts[3], canthusPts[0]));
float tile4 = rad2deg(tileRadianBtwn2Pts(canthusPts[1], canthusPts[2]));
float tile5 = (tile3+tile4)/2.0f;
float tile6 = tile1*0.5f + tile2*0.25f + tile5*0.25f;
return tile6;
}
Point2f caculateEyePairCenter(const vector<Point2f>& canthusPts) {
return Point ((canthusPts[2].x+canthusPts[3].x)/2,(canthusPts[2].y+canthusPts[3].y)/2);
}
Point2f rotatePointByRotationMatrix(Point2f src,Mat M) {
Point2f result;
result.x = M.at<double>(0,0)*src.x + M.at<double>(0,1)*src.y + M.at<double>(0,2);
result.y = M.at<double>(1,0)*src.x + M.at<double>(1,1)*src.y + M.at<double>(1,2);
return result;
}
vector<Point2f> rotatePointsByRotationMatrix(const vector<Point2f>& original,const Mat& RM) {
vector<Point2f> pts(original);
for (int i = 0 ; i < pts.size() ; i ++) {
pts[i] = rotatePointByRotationMatrix(pts[i], RM);
}
return pts;
}
cv::Mat cropRotatedRectFromImage(const cv::RotatedRect & rotatedRect, const cv::Mat & image) {
cv::Rect2f boundingRect = rotatedRect.boundingRect();
boundingRect.x = boundingRect.x >= 0 ? boundingRect.x : 0;
boundingRect.y = boundingRect.y >= 0 ? boundingRect.y : 0;
boundingRect.width = boundingRect.x+boundingRect.width < image.size().width ? boundingRect.width : image.size().width - boundingRect.x;
boundingRect.height = boundingRect.y+boundingRect.height < image.size().height ? boundingRect.height : image.size().height - boundingRect.y;
cv::Point2f centerInRect = cv::Point2f(boundingRect.size().width, boundingRect.size().height) * 0.5;
cv::Mat1d localRotationM = getRotationMatrix2D(centerInRect, rotatedRect.angle, 1.0);
Mat rotatedBox, croppedImage;
warpAffine(image(boundingRect), rotatedBox, localRotationM, boundingRect.size());
getRectSubPix(rotatedBox, rotatedRect.size, centerInRect, croppedImage);
return croppedImage;
}
cv::Point2f invertPointInRotatedRect(const cv::Point2f & point, const cv::RotatedRect &rotatedRect) {
cv::Point2f centerInRRect = cv::Point2f(rotatedRect.size.width, rotatedRect.size.height) * 0.5;
cv::Point2f pointCentered = point - centerInRRect;
Mat1d invertT = getRotationMatrix2D(cv::Point2f(0,0), -rotatedRect.angle, 1.0);
invertT(0,2) = (double)rotatedRect.center.x;
invertT(1,2) = (double)rotatedRect.center.y;
Mat2d resultMat;
cv::transform(cv::Mat2d(pointCentered), resultMat, invertT);
return cv::Point2f(resultMat(0)[0], resultMat(0)[1]);
}
Point rectBL(Rect rect) {
return Point(rect.x,rect.y+rect.height);
}
Point rectTR(Rect rect) {
return Point(rect.x+rect.width,rect.y);
}
Point rectBR(Rect rect) {
return Point(rect.x+rect.width,rect.y+rect.height);
}
Rect findBiggestSquare(const Mat& original_img) {
int width = original_img.size().width;
int height =original_img.size().height;
int length = width > height ? height : width;
int x = width > height ? (width - length)/2 : 0;
int y = width > height ? 0 :(height - length)/2 ;
Rect squareRect(x,y,length,length);
return squareRect;
}
void fliplr(vector<Point2f>& points,Size imageSize) {
for (int i = 0 ; i < points.size(); i++) {
points[i].x = imageSize.width - points[i].x;
}
}
// It has been verified that the following two methods are mutual certified.
/*
cv::Mat eulerAngles = getEulerAnglesFromRotationMatrix(attitudeFaceToNavi);
cout<<eulerAngles.at<double>(0)<<" "<<eulerAngles.at<double>(1)<<" "<<eulerAngles.at<double>(2)<<endl;
cv::Mat rmFromEulerAngle = getRotationMatrixFromEulerAngles(eulerAngles);
cv::Mat eulerAngleFromTransformedRM = getEulerAnglesFromRotationMatrix(rmFromEulerAngle);
cv::Mat angleGap = eulerAngles - eulerAngleFromTransformedRM;
// The "angleGap" is always smaller than 1e-16.
*/
// Converts a given Rotation Matrix to Euler angles
cv::Vec3d getEulerAnglesFromRotationMatrix(const cv::Matx33d & rotationMatrix)
{
cv::Vec3d euler;
double m00 = rotationMatrix(0,0);
double m10 = rotationMatrix(1,0);
double m11 = rotationMatrix(1,1);
double m12 = rotationMatrix(1,2);
double m20 = rotationMatrix(2,0);
double x, y, z;
// Assuming the angles are in radians.
// if (m10 > 0.998) { // singularity at north pole
// x = 0;
// y = CV_PI/2;
// z = atan2(m02,m22);
// }
// else if (m10 < -0.998) { // singularity at south pole
// x = 0;
// y = -CV_PI/2;
// z = atan2(m02,m22);
// }
// else
// {
x = atan2(-m12,m11);
y = asin(m10);
z = atan2(-m20,m00);
// }
euler(0) = x;
euler(1) = y;
euler(2) = z;
return euler;
}
// Converts a given Euler angles to Rotation Matrix
cv::Matx33d getRotationMatrixFromEulerAngles (const cv::Vec3d & euler)
{
cv::Matx33d rotationMatrix;
double x = euler(0);
double y = euler(1);
double z = euler(2);
// Assuming the angles are in radians.
double ch = cos(z);
double sh = sin(z);
double ca = cos(y);
double sa = sin(y);
double cb = cos(x);
double sb = sin(x);
double m00, m01, m02, m10, m11, m12, m20, m21, m22;
m00 = ch * ca;
m01 = sh*sb - ch*sa*cb;
m02 = ch*sa*sb + sh*cb;
m10 = sa;
m11 = ca*cb;
m12 = -ca*sb;
m20 = -sh*ca;
m21 = sh*sa*cb + ch*sb;
m22 = -sh*sa*sb + ch*cb;
rotationMatrix(0,0) = m00;
rotationMatrix(0,1) = m01;
rotationMatrix(0,2) = m02;
rotationMatrix(1,0) = m10;
rotationMatrix(1,1) = m11;
rotationMatrix(1,2) = m12;
rotationMatrix(2,0) = m20;
rotationMatrix(2,1) = m21;
rotationMatrix(2,2) = m22;
return rotationMatrix;
}
}
//
// CoreOpenCVExtensionsOSX.h
// CoreOpenCVExtensionsOSX
//
// Created by JiangZhping on 2016/9/29.
// Copyright © 2016年 JiangZhping. All rights reserved.
//
#import <Cocoa/Cocoa.h>
//! Project version number for CoreOpenCVExtensionsOSX.
FOUNDATION_EXPORT double CoreOpenCVExtensionsOSXVersionNumber;
//! Project version string for CoreOpenCVExtensionsOSX.
FOUNDATION_EXPORT const unsigned char CoreOpenCVExtensionsOSXVersionString[];
// In this header, you should import all the public headers of your framework using statements like #import <CoreOpenCVExtensionsOSX/PublicHeader.h>
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>en</string>
<key>CFBundleExecutable</key>
<string>$(EXECUTABLE_NAME)</string>
<key>CFBundleIdentifier</key>
<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>$(PRODUCT_NAME)</string>
<key>CFBundlePackageType</key>
<string>FMWK</string>
<key>CFBundleShortVersionString</key>
<string>1.0</string>
<key>CFBundleVersion</key>
<string>$(CURRENT_PROJECT_VERSION)</string>
<key>NSHumanReadableCopyright</key>
<string>Copyright © 2016年 JiangZhping. All rights reserved.</string>
<key>NSPrincipalClass</key>
<string></string>
</dict>
</plist>
//
// PrefixHeader.pch
// GazeEstimationOnCocoa
//
// Created by JiangZhiping on 15/11/4.
// Copyright © 2015年 JiangZhiping. All rights reserved.
//
#ifdef __cplusplus
#import <opencv2/opencv.hpp>
#endif
// Include any system framework and library headers here that should be included in all compilation units.
// You will also need to set the Prefix Header build setting of one or more of your targets to reference this file.
//
// jzplib_bwOps.h
// OPENCV_HOTSHOTS
//
// Created by Zhiping Jiang on 14-7-18.
//
//
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
namespace cve {
Mat fillHoleInBinary(Mat bwImage);
Mat removeSmallBlobs(Mat bwImage);
Mat fillConvexHulls(Mat bwImage) ;
}
//
// jzplib_camera.h
// JZP_EYE_TRACKING
//
// Created by Zhiping Jiang on 14-8-23.
//
//
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
namespace cve {
bool readCameraProfile(const std::string & filepath, Mat & cameraMatrix, Mat & distCoeffs);
Mat cameraMatrixByCropNResize(Mat originalCameraMatrix, cv::Size originalSize, cv::Rect currentRect, float resizeFactor);
bool captureImage(VideoCapture& capture, Mat& color_img, bool flip = false);
vector<vector<Point3f> > calcBoardCornerPositions(int gridW, int gridH, float squareSize, int imagesCount);
bool chessboardCameraCalibration(int gridW, int gridH, float gridSize, vector<std::string> imagePaths, Mat & cameraMatrix, Mat & distCoeffs, bool drawCorners);
}
//
// jzplib_draw.h
// OPENCV_HOTSHOTS
//
// Created by Zhiping Jiang on 14-7-18.
//
//
#import "CoreOpenCVExtensions.h"
namespace cve {
void drawBox(cv::Mat& image, CvRect box, cv::Scalar color = cvScalarAll(255), int thick=1);
void drawPoints(cv::Mat& image, vector<cv::Point2f> points, Scalar color=cv::Scalar::all(255));
void drawPoints(cv::Mat& image, vector<cv::Point2d> points, Scalar color=cv::Scalar::all(255));
void drawConnectedPoints(Mat& image, vector<Point2f> points, Scalar color=cv::Scalar::all(255), int thick=1);
void drawRotatedRect(Mat image, RotatedRect eyeRRect);
void drawStringAtPoint(Mat img, const string text, cv::Point position);
void drawStringAtTopLeftCorner(Mat img, const string text);
void drawColorHistGram(Mat hist);
void imagesc(string windowName, Mat image, int colormap = COLORMAP_JET) ;
}