前言
- 整理總結一下之前做的倒影相關操作,記錄講解一下整體思路
API & Property
@interface KJReflectionImageView : UIImageView
/// 初始化
- (instancetype)kj_initWithVesselView:(KJInteriorVesselView*)vesselView ExtendParameterBlock:(void(^_Nullable)(KJReflectionImageView *obj))paramblock;
#pragma mark - ExtendParameterBlock 擴展參數
@property(nonatomic,strong,readonly) KJReflectionImageView *(^kAddView)(UIView*);
/// 倒影指定內容(壁畫、牆紙、窗簾)
@property(nonatomic,strong,readonly) KJReflectionImageView *(^kAppointContent)(bool);
/// 清晰度(高斯模糊)
@property(nonatomic,strong,readonly) KJReflectionImageView *(^kGaussianFuzzy)(CGFloat gaussian);
/// 地板容器
@property(nonatomic,strong,readonly) KJReflectionImageView *(^kFloorVesselView)(KJInteriorVesselView*);
/// 透視和矯正處理
@property(nonatomic,strong,readonly) KJReflectionImageView *(^kCorrectImageBlock)(kInteriorSizePerspectiveBlock);
@end
kj_initWithVesselView:ExtendParameterBlock:
:初始化方法,傳入需要倒影的容器
ExtendParameterBlock 擴展參數
kAddView
:添加
kAppointContent
:倒影指定內容(壁畫、牆紙、窗簾)
kGaussianFuzzy
:清晰度(高斯模糊)
kFloorVesselView
:倒影主要作用於地板,
kCorrectImageBlock
:透視和矯正處理
詳細介紹內部思路
1.矯正透視圖點處理
判斷水平方向和豎直方向那邊更趨近於平行,從而得到當前需要倒影透視圖的透視四點和level
屬性,該屬性後面會使用到
/// 矯正透視圖點處理
- (KJKnownPoints)kj_correctImageWithknownPoints:(KJKnownPoints)tempPoints{
CGPoint pt = tempPoints.PointA;
CGFloat x1 = fabs(tempPoints.PointA.x - tempPoints.PointD.x);
CGFloat x2 = fabs(tempPoints.PointB.x - tempPoints.PointC.x);
CGFloat y1 = fabs(tempPoints.PointA.y - tempPoints.PointB.y);
CGFloat y2 = fabs(tempPoints.PointC.y - tempPoints.PointD.y);
if (fabs(x1-x2)>fabs(y1-y2)) {
tempPoints.PointA = tempPoints.PointB;
tempPoints.PointB = pt;
pt = tempPoints.PointD;
tempPoints.PointD = tempPoints.PointC;
tempPoints.PointC = pt;
self.level = true;
}else{
tempPoints.PointA = tempPoints.PointD;
tempPoints.PointD = pt;
pt = tempPoints.PointB;
tempPoints.PointB = tempPoints.PointC;
tempPoints.PointC = pt;
self.level = false;
}
tempPoints.PointA = [self.superview convertPoint:tempPoints.PointA toView:self];
tempPoints.PointB = [self.superview convertPoint:tempPoints.PointB toView:self];
tempPoints.PointC = [self.superview convertPoint:tempPoints.PointC toView:self];
tempPoints.PointD = [self.superview convertPoint:tempPoints.PointD toView:self];
return tempPoints;
}
2.獲取需要透視的圖片
根據appoint
屬性來確定哪些需要倒影的素材,複製View其實就是隻複製上面展示的控件View,而不復制Layer相關的數據
/// 複製UIView
- (KJInteriorVesselView*)kj_copyInteriorVesselView:(KJInteriorVesselView*)view{
NSData *tempArchive = [NSKeyedArchiver archivedDataWithRootObject:view];
return [NSKeyedUnarchiver unarchiveObjectWithData:tempArchive];
}
截圖
/// 獲取截圖
- (UIImage*)kj_captureScreen:(UIView*)view{
UIGraphicsBeginImageContext(view.bounds.size);
CGContextRef ctx = UIGraphicsGetCurrentContext();
[view.layer renderInContext:ctx];
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return newImage;
}
3.矯正圖片
UIImage *correctImage = self.xxxblock(vesselView.frame.size,tempPoints,image);
4.裁剪正確圖片
將圖片的多餘部分裁剪出去,
/// 裁剪圖片
- (UIImage*)kj_clipImage:(UIImage*)image Level:(bool)level{
CGRect rect = CGRectZero;
KJKnownPoints pts = self.vesselView.knownPoints;
CGFloat AB = fabs(pts.PointA.y - pts.PointB.y);
CGFloat CD = fabs(pts.PointC.y - pts.PointD.y);
CGFloat AD = fabs(pts.PointA.x - pts.PointD.x);
CGFloat CB = fabs(pts.PointC.x - pts.PointB.x);
CGFloat w = fabs(pts.PointA.x - pts.PointD.x);
CGFloat h = fabs(pts.PointC.y - pts.PointD.y);
pts.PointA = [self.superview convertPoint:pts.PointA toView:self];
pts.PointB = [self.superview convertPoint:pts.PointB toView:self];
pts.PointC = [self.superview convertPoint:pts.PointC toView:self];
pts.PointD = [self.superview convertPoint:pts.PointD toView:self];
if (level) {
rect = CGRectMake(AD>CB?pts.PointB.x:pts.PointA.x,pts.PointA.y,AD>CB?CB:AD,h);
}else{
rect = CGRectMake(pts.PointA.x,AB>CD?pts.PointD.y:pts.PointA.y,w,AB>CD?CD:AB);
}
UIImage *newImage = [self kj_cutImageWithImage:image Frame:rect];
if (level == false) newImage = [self kj_rotationImageWithImage:newImage];
return newImage;
}
圖片旋轉180°
/// 圖片旋轉180°
- (UIImage*)kj_rotationImageWithImage:(UIImage*)image{
CGRect rect = CGRectZero;
rect.size.width = CGImageGetWidth(image.CGImage);
rect.size.height = CGImageGetHeight(image.CGImage);
CGRect bounds = rect;
CGAffineTransform transform = CGAffineTransformIdentity;
transform = CGAffineTransformMakeTranslation(rect.size.width,rect.size.height);
transform = CGAffineTransformRotate(transform, M_PI);
UIGraphicsBeginImageContext(bounds.size);
CGContextRef context = UIGraphicsGetCurrentContext();
CGContextScaleCTM(context, 1.0, -1.0);
CGContextTranslateCTM(context, 0.0, -rect.size.height);
CGContextConcatCTM(context, transform);
CGContextDrawImage(UIGraphicsGetCurrentContext(), rect, image.CGImage);
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return newImage;
}
5.高斯模糊處理
這裏主要採用的是 Accelerate 框架裏面的模糊濾鏡處理,
clipImage = [clipImage kj_linearBlurryImageBlur:self.gaussian/kReflection_gaussian_max];
模糊函數,
- (UIImage*)kj_linearBlurryImageBlur:(CGFloat)blur{
blur = MAX(MIN(blur,1),0);
int boxSize = (int)(blur * 100);
boxSize = boxSize - (boxSize % 2) + 1;
CGImageRef img = self.CGImage;
vImage_Buffer inBuffer, outBuffer, rgbOutBuffer;
vImage_Error error;
void *pixelBuffer, *convertBuffer;
CGDataProviderRef inProvider = CGImageGetDataProvider(img);
CFDataRef inBitmapData = CGDataProviderCopyData(inProvider);
convertBuffer = malloc( CGImageGetBytesPerRow(img) * CGImageGetHeight(img) );
rgbOutBuffer.width = CGImageGetWidth(img);
rgbOutBuffer.height = CGImageGetHeight(img);
rgbOutBuffer.rowBytes = CGImageGetBytesPerRow(img);
rgbOutBuffer.data = convertBuffer;
inBuffer.width = CGImageGetWidth(img);
inBuffer.height = CGImageGetHeight(img);
inBuffer.rowBytes = CGImageGetBytesPerRow(img);
inBuffer.data = (void *)CFDataGetBytePtr(inBitmapData);
pixelBuffer = malloc( CGImageGetBytesPerRow(img) * CGImageGetHeight(img) );
if (pixelBuffer == NULL) NSLog(@"No pixelbuffer");
outBuffer.data = pixelBuffer;
outBuffer.width = CGImageGetWidth(img);
outBuffer.height = CGImageGetHeight(img);
outBuffer.rowBytes = CGImageGetBytesPerRow(img);
void *rgbConvertBuffer = malloc( CGImageGetBytesPerRow(img) * CGImageGetHeight(img) );
vImage_Buffer outRGBBuffer;
outRGBBuffer.width = CGImageGetWidth(img);
outRGBBuffer.height = CGImageGetHeight(img);
outRGBBuffer.rowBytes = CGImageGetBytesPerRow(img);
outRGBBuffer.data = rgbConvertBuffer;
/// box濾鏡(模糊濾鏡)
error = vImageBoxConvolve_ARGB8888(&inBuffer,&outBuffer,NULL,0,0,boxSize,boxSize,NULL,kvImageEdgeExtend);
if (error) NSLog(@"error from convolution %ld", error);
/// 交換像素通道從BGRA到RGBA
const uint8_t permuteMap[] = {2, 1, 0, 3};
vImagePermuteChannels_ARGB8888(&outBuffer,&rgbOutBuffer,permuteMap,kvImageNoFlags);
/// kCGImageAlphaPremultipliedLast 保留透明區域
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef ctx = CGBitmapContextCreate(rgbOutBuffer.data,
rgbOutBuffer.width,
rgbOutBuffer.height,
8,
rgbOutBuffer.rowBytes,
colorSpace,
kCGImageAlphaPremultipliedLast);
CGImageRef imageRef = CGBitmapContextCreateImage(ctx);
UIImage *returnImage = [UIImage imageWithCGImage:imageRef];
CGContextRelease(ctx);
free(pixelBuffer);
free(convertBuffer);
free(rgbConvertBuffer);
CFRelease(inBitmapData);
CGColorSpaceRelease(colorSpace);
CGImageRelease(imageRef);
return returnImage;
}
6.尋找新的透視四點
6.1 找到倒影容器的中點與地板四點形成的線的交點
/// 已知ABCDE,求E豎直線與ABCD分別形成的線交點,無窮大代表無交點
static inline KJIntersectPoints kj_dotIntersections(CGPoint A,CGPoint B,CGPoint C,CGPoint D,CGPoint E){
CGPoint pt = CGPointMake(HUGE_VAL,HUGE_VAL);
KJIntersectPoints points = {pt,pt,pt,pt};
CGFloat x1 = A.x,x2 = B.x,x3 = C.x,x4 = D.x;
if (x1 != x2) points.AB = kj_intersectionDot(A,B,E);
if (x2 != x3) points.BC = kj_intersectionDot(B,C,E);
if (x3 != x4) points.CD = kj_intersectionDot(C,D,E);
if (x4 != x1) points.DA = kj_intersectionDot(D,A,E);
return points;
}
6.2 判斷哪些點在線段之間
/// 判斷點是否在線上
static inline bool kj_dotIsLine(CGPoint point,CGPoint A,CGPoint B){
CGFloat x = point.x,x1 = A.x,x2 = B.x;
if (x1>x2) {
if (x2 <= x && x <= x1) return true;
}else{
if (x1 <= x && x <= x2) return true;
}
return false;
}
6.3 確定翻轉軸,倒影的翻轉中心點
/// 確定翻轉軸
static inline KJTurnLine kj_confirmTurnLine(CGPoint A,CGPoint B,CGPoint C,CGPoint D,CGPoint E){
NSMutableArray *temp = [NSMutableArray array];
KJIntersectPoints intersects = kj_dotIntersections(A,B,C,D,E);
bool ab = kj_dotIsLine(intersects.AB, A, B);
[temp addObject:ab?@(fabs(intersects.AB.y-E.y)):@(0)];
bool bc = kj_dotIsLine(intersects.BC, B, C);
[temp addObject:bc?@(fabs(intersects.BC.y-E.y)):@(0)];
bool cd = kj_dotIsLine(intersects.CD, C, D);
[temp addObject:cd?@(fabs(intersects.CD.y-E.y)):@(0)];
bool da = kj_dotIsLine(intersects.DA, D, A);
[temp addObject:da?@(fabs(intersects.DA.y-E.y)):@(0)];
float min = INFINITY;
int idx = 0;
for (int i=0; i<temp.count; i++) {
float b = [temp[i] floatValue];
if (b == 0) continue;
if (b < min) idx = i;
min = b>min?min:b;
}
temp = nil;
if (idx == 0) {
return (KJTurnLine){A,B};
}else if (idx == 1) {
return (KJTurnLine){B,C};
}else if (idx == 2) {
return (KJTurnLine){C,D};
}else {
return (KJTurnLine){D,A};
}
}
6.4 找到豎直的線交點,因爲倒影都是豎直向下
/// 已知ABC,求C豎直線與AB的交點
static inline CGPoint kj_intersectionDot(CGPoint A,CGPoint B,CGPoint C){
CGFloat x1 = A.x,y1 = A.y;
CGFloat x2 = B.x,y2 = B.y;
CGFloat k = x1 == x2 ? 1 : (y1-y2)/(x1-x2);
CGFloat b = y1 - k*x1;
CGFloat x = C.x;
CGFloat y = k * x + b;
return CGPointMake(x,y);
}
6.5 找到倒影視圖的透視點
/// 找到透視點
- (CGPoint)kj_point:(CGPoint)point Line:(KJTurnLine)line{
CGPoint pt = kj_intersectionDot(line.point1,line.point2,point);
CGFloat y = fabs(pt.y-point.y) + pt.y;
return CGPointMake(pt.x, y);
}
經過上面一系列的數學運算,就得到了倒影的透視四點
7.透視處理獲得透視圖
self.image = self.xxxblock(self.frame.size,newPoints,clipImage);
8.裁剪超出地板區域部分
/// 裁剪超出區域
CAShapeLayer *maskLayer = [[CAShapeLayer alloc] init];
maskLayer.frame = CGRectMake(-self.frame.origin.x, -self.frame.origin.y, self.frame.size.width, self.frame.size.height);
maskLayer.path = self.floorView.outsidePath.CGPath;
self.layer.mask = maskLayer;
- 到此,其實倒影的核心思路就差不多都出來了。
補充說明
/// AB線上所有間距點
static inline NSArray * kj_findLineDots(CGPoint A,CGPoint B,CGFloat space){
NSMutableArray *dots = [NSMutableArray array];
CGFloat x1 = A.x,y1 = A.y;
CGFloat x2 = B.x,y2 = B.y;
CGFloat k = x1 == x2 ? 1 : (y1-y2)/(x1-x2);
CGFloat b = y1 - k*x1;
CGFloat AB = sqrt(pow(x1-x2,2) + pow(y1-y2,2));
NSInteger count = AB / space;
CGFloat x,y;
CGPoint pt = CGPointZero;
for (int i=0; i<=count; i++) {
if (k==0) {
x = space * i + x1;
y = y1;
}else{
y = space * i * (y2-y1)/AB + y1;
x = (y-b) / k;
}
pt = CGPointMake(x, y);
[dots addObject:NSStringFromCGPoint(pt)];
}
if (CGPointEqualToPoint(pt,B)==false) {
[dots addObject:NSStringFromCGPoint(B)];
}
return dots.mutableCopy;
}
找到最近的點
static inline CGPoint kj_findShortestPoint(CGPoint pt1,NSArray *points,CGFloat len){
NSMutableArray *temp = [NSMutableArray arrayWithArray:points];
for (NSString *str in points) {
CGPoint pt2 = CGPointFromString(str);
CGFloat lenght = sqrt(pow(pt1.x-pt2.x,2) + pow(pt1.y-pt2.y,2));
if (lenght!=0 && lenght<len) {
return kj_findShortestPoint(pt2, temp.mutableCopy, lenght);
}else if (lenght>len) {
[temp removeObject:str];
}
}
temp = nil;
return pt1;
}
擴展參數
#pragma mark - ExtendParameterBlock 擴展參數
- (KJReflectionImageView *(^)(UIView*))kAddView {
return ^(UIView *view){
[view addSubview:self];
return self;
};
}
- (KJReflectionImageView *(^)(bool))kAppointContent {
return ^(bool boo){
self.appoint = boo;
return self;
};
}
- (KJReflectionImageView *(^)(CGFloat))kGaussianFuzzy {
return ^(CGFloat gaussian){
self.gaussian = gaussian;
return self;
};
}
- (KJReflectionImageView *(^)(KJInteriorVesselView*))kFloorVesselView {
return ^(KJInteriorVesselView *view) {
self.floorView = view;
return self;
};
}
- (KJReflectionImageView *(^)(kInteriorSizePerspectiveBlock))kCorrectImageBlock {
return ^(kInteriorSizePerspectiveBlock block) {
self.xxxblock = block;
return self;
};
}
附上一張效果圖:
備註:這裏其實還有小毛病就是如果透視點中間的碎點太多的話,計算點的方式就不夠準確,會導致尋找的透視四點出現偏差,歡迎大神們來指導聊聊,謝謝!!