#swift #filter #webrtc
#быстрый #Фильтр #webrtc
Вопрос:
я хочу добавить некоторый эффект фильтра в localVideo, поэтому я изменил CMSampleBuffer:
- Преобразовать в UIImage
- Использование VNFaceDetector для обнаружения границы лица
- Добавьте мое изображение фильтра в изображение камеры
- Преобразуйте обратно в CMSampleBuffer
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) { guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return } let ciimage = CIImage(cvPixelBuffer: imageBuffer) let image = convert(cmage: ciimage) print("image: (image.size)") let faceDetectionRequest = VNDetectFaceLandmarksRequest(completionHandler: { (request: VNRequest, error: Error?) in DispatchQueue.main.async {[weak self] in if let observations = request.results as? [VNFaceObservation], !observations.isEmpty { for observation in observations { let box = observation.boundingBox let boxFrame = CGRect(x: box.origin.x * image.size.width, y: box.origin.y * image.size.height, width: box.width * image.size.width, height: box.height * image.size.height) print("box: (boxFrame)") let logo = UIImage(named: "dog_nose")! if let newImage = self?.drawImageIn(image, logo, inRect: boxFrame){ if let pxBuffer = self?.convertImageToBuffer(from: newImage){ var newSampleBuffer: CMSampleBuffer? = nil var timimgInfo: CMSampleTimingInfo = .invalid var videoInfo: CMVideoFormatDescription? = nil CMVideoFormatDescriptionCreateForImageBuffer(allocator: nil, imageBuffer: pxBuffer, formatDescriptionOut: amp;videoInfo) if videoInfo != nil{ CMSampleBufferCreateForImageBuffer(allocator: kCFAllocatorDefault, imageBuffer: pxBuffer, dataReady: true, makeDataReadyCallback: nil, refcon: nil, formatDescription: videoInfo!, sampleTiming: amp;timimgInfo, sampleBufferOut: amp;newSampleBuffer) if newSampleBuffer != nil{ self?.outputCaptureDelegate?.captureOutput!(output, didOutput: newSampleBuffer!, from: connection) return } } } } } } self?.outputCaptureDelegate?.captureOutput!(output, didOutput: sampleBuffer, from: connection) } }) let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: imageBuffer, orientation: .up, options: [:]) do { try imageRequestHandler.perform([faceDetectionRequest]) } catch { print(error.localizedDescription) } }
func convert(cmage: CIImage) -gt; UIImage { let context = CIContext(options: nil) let cgImage = context.createCGImage(cmage, from: cmage.extent)! let image = UIImage(cgImage: cgImage) return image } func drawImageIn(_ image: UIImage, _ logo: UIImage, inRect: CGRect) -gt; UIImage { let renderer = UIGraphicsImageRenderer(size: image.size) return renderer.image { context in image.draw(in: CGRect(origin: CGPoint.zero, size: image.size)) logo.draw(in: inRect) } } func convertImageToBuffer(from image: UIImage) -gt; CVPixelBuffer? { let attrs = [ String(kCVPixelBufferCGImageCompatibilityKey) : true, String(kCVPixelBufferCGBitmapContextCompatibilityKey) : true ] as [String : Any] var buffer : CVPixelBuffer? let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(image.size.width), Int(image.size.height), kCVPixelFormatType_32ARGB, attrs as CFDictionary, amp;buffer) guard (status == kCVReturnSuccess) else { return nil } CVPixelBufferLockBaseAddress(buffer!, CVPixelBufferLockFlags(rawValue: 0)) let pixelData = CVPixelBufferGetBaseAddress(buffer!) let rgbColorSpace = CGColorSpaceCreateDeviceRGB() let context = CGContext(data: pixelData, width: Int(image.size.width), height: Int(image.size.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(buffer!), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue) context?.translateBy(x: 0, y: image.size.height) context?.scaleBy(x: 1.0, y: -1.0) UIGraphicsPushContext(context!) image.draw(in: CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height)) UIGraphicsPopContext() CVPixelBufferUnlockBaseAddress(buffer!, CVPixelBufferLockFlags(rawValue: 0)) return buffer }
but the problem is video is more slow and the image if wrong orientation, did someone faced this?