我想在localVideo中添加一些滤镜效果,所以我修改了CMSampleBuffer:
- 转换为 UIImage
- 使用VNFaceDetector检测人脸boundingBox
- 将我的滤镜图像添加到相机图像中
- 转换回 CMSampleBuffer
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
let ciimage = CIImage(cvPixelBuffer: imageBuffer)
let image = convert(cmage: ciimage)
print("image: \(image.size)")
let faceDetectionRequest = VNDetectFaceLandmarksRequest(completionHandler: { (request: VNRequest, error: Error?) in
DispatchQueue.main.async {[weak self] in
if let observations = request.results as? [VNFaceObservation], !observations.isEmpty {
for observation in observations {
let box = observation.boundingBox
let boxFrame = CGRect(x: box.origin.x * image.size.width, y: box.origin.y * image.size.height, width: box.width * image.size.width, height: box.height * image.size.height)
print("box: \(boxFrame)")
let logo = UIImage(named: "dog_nose")!
if let newImage = self?.drawImageIn(image, logo, inRect: boxFrame){
if let pxBuffer = self?.convertImageToBuffer(from: newImage){
var newSampleBuffer: CMSampleBuffer? = nil
var timimgInfo: CMSampleTimingInfo = .invalid
var videoInfo: CMVideoFormatDescription? = nil
CMVideoFormatDescriptionCreateForImageBuffer(allocator: nil, imageBuffer: pxBuffer, formatDescriptionOut: &videoInfo)
if videoInfo != nil{
CMSampleBufferCreateForImageBuffer(allocator: kCFAllocatorDefault, imageBuffer: pxBuffer, dataReady: true, makeDataReadyCallback: nil, refcon: nil, formatDescription: videoInfo!, sampleTiming: &timimgInfo, sampleBufferOut: &newSampleBuffer)
if newSampleBuffer != nil{
self?.outputCaptureDelegate?.captureOutput!(output, didOutput: newSampleBuffer!, from: connection)
return
}
}
}
}
}
}
self?.outputCaptureDelegate?.captureOutput!(output, didOutput: sampleBuffer, from: connection)
}
})
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: imageBuffer, orientation: .up, options: [:])
do {
try imageRequestHandler.perform([faceDetectionRequest])
} catch {
print(error.localizedDescription)
}
}
func convert(cmage: CIImage) -> UIImage {
let context = CIContext(options: nil)
let cgImage = context.createCGImage(cmage, from: cmage.extent)!
let image = UIImage(cgImage: cgImage)
return image
}
func drawImageIn(_ image: UIImage, _ logo: UIImage, inRect: CGRect) -> UIImage {
let renderer = UIGraphicsImageRenderer(size: image.size)
return renderer.image { context in
image.draw(in: CGRect(origin: CGPoint.zero, size: image.size))
logo.draw(in: inRect)
}
}
func convertImageToBuffer(from image: UIImage) -> CVPixelBuffer? {
let attrs = [
String(kCVPixelBufferCGImageCompatibilityKey) : true,
String(kCVPixelBufferCGBitmapContextCompatibilityKey) : true
] as [String : Any]
var buffer : CVPixelBuffer?
let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(image.size.width), Int(image.size.height), kCVPixelFormatType_32ARGB, attrs as CFDictionary, &buffer)
guard (status == kCVReturnSuccess) else {
return nil
}
CVPixelBufferLockBaseAddress(buffer!, CVPixelBufferLockFlags(rawValue: 0))
let pixelData = CVPixelBufferGetBaseAddress(buffer!)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: pixelData, width: Int(image.size.width), height: Int(image.size.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(buffer!), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue)
context?.translateBy(x: 0, y: image.size.height)
context?.scaleBy(x: 1.0, y: -1.0)
UIGraphicsPushContext(context!)
image.draw(in: CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height))
UIGraphicsPopContext()
CVPixelBufferUnlockBaseAddress(buffer!, CVPixelBufferLockFlags(rawValue: 0))
return buffer
}
但问题是视频速度更慢并且图像方向错误,有人遇到过这个吗?