I have follow this instructions to achieve video (small) over video (big).
Getting assets, create composition... like this
...
let videoComposition = AVMutableVideoComposition()
videoComposition.customVideoCompositorClass = CustomComposition.self
...
and the main problem is when need to CGContext to draw cgimage:
class CustomComposition: NSObject, AVVideoCompositing {
var requiredPixelBufferAttributesForRenderContext: [String : Any] = [
kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32BGRA),
kCVPixelBufferOpenGLESCompatibilityKey as String : NSNumber(value: true),
kCVPixelBufferOpenGLCompatibilityKey as String : NSNumber(value: true)
]
var sourcePixelBufferAttributes: [String : Any]? = [
kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32BGRA),
kCVPixelBufferOpenGLESCompatibilityKey as String : NSNumber(value: true),
kCVPixelBufferOpenGLCompatibilityKey as String : NSNumber(value: true)
]
override init() {
super.init()
}
func startRequest(_ request: AVAsynchronousVideoCompositionRequest) {
guard let destination: CVPixelBuffer = request.renderContext.newPixelBuffer() else { return request.finish(with: NSError(domain: "", code: 3, userInfo: nil)) }
if request.sourceTrackIDs.count == 2 {
guard let front = request.sourceFrame(byTrackID: 2) else { return request.finish(with: NSError(domain: "", code: 1, userInfo: nil)) }
guard let back = request.sourceFrame(byTrackID: 1) else { return request.finish(with: NSError(domain: "", code: 2, userInfo: nil)) }
CVPixelBufferLockBaseAddress(front, .readOnly)
CVPixelBufferLockBaseAddress(back, .readOnly)
CVPixelBufferLockBaseAddress(destination, [])
renderFrontBuffer(front, back: back, to: destination)
CVPixelBufferUnlockBaseAddress(destination, [])
CVPixelBufferUnlockBaseAddress(back, .readOnly)
CVPixelBufferUnlockBaseAddress(front, .readOnly)
}
request.finish(withComposedVideoFrame: destination)
CVBufferRemoveAllAttachments(destination)
}
func renderFrontBuffer(_ front: CVPixelBuffer, back: CVPixelBuffer, to destination: CVPixelBuffer) {
var gc: CGContext? = nil
guard let frontImage: CGImage = createSourceImage(from: front) else { return }
guard let backImage: CGImage = createSourceImage(from: back) else { return }
let width: Int = CVPixelBufferGetWidth(destination) //Int(Config.renderSize.width)
let height: Int = CVPixelBufferGetHeight(destination)
let frame = CGRect(x: 0, y: 0, width: CGFloat(width), height: CGFloat(height))
let colorSpace = backImage.colorSpace!
gc = CGContext(data: CVPixelBufferGetBaseAddress(destination), width: width, height: height, bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(destination), space: colorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)
// MARK: - Place Back (big)
//gc?.draw(backImage, in: frame) // <- Problem: EXC_BAD_ACCESS
gc?.beginPath()
gc?.addRect(CGRect.init(x: 24, y: 64, width: 67, height: 120))
gc?.setFillColor(UIColor.yellow.cgColor)
gc?.fillPath()
// MARK: - Place Front (small)
//gc?.draw(frontImage, in: frame) // <- Problem: EXC_BAD_ACCESS
}
func createSourceImage(from buffer: CVPixelBuffer) -> CGImage? {
let width: Int = CVPixelBufferGetWidth(buffer)
let height: Int = CVPixelBufferGetHeight(buffer)
let stride: Int = CVPixelBufferGetBytesPerRow(buffer)
var data = CVPixelBufferGetBaseAddress(buffer)
let rgb = CGColorSpaceCreateDeviceRGB()
let provider = CGDataProvider(dataInfo: nil, data: &data, size: height * stride) { (_, _, _) in }
var image: CGImage? = nil
let last = CGBitmapInfo(rawValue: CGBitmapInfo.byteOrder32Big.rawValue | CGImageAlphaInfo.premultipliedLast.rawValue)
if let provider = provider {
image = CGImage(width: width, height: height, bitsPerComponent: 8, bitsPerPixel: 32, bytesPerRow: stride, space: rgb, bitmapInfo: last, provider: provider, decode: nil, shouldInterpolate: false, intent: .defaultIntent)
}
return image
}
func renderContextChanged(_ newRenderContext: AVVideoCompositionRenderContext) { }
}
When the .draw is not called, render something like this and have audio of both videos (that's ok), otherwise when is called get immediately EXC_BAD_ACCESS
error.
I already try to debug with Zombie Object, but nothing.
I also try to save png of cgimage and the error is the same
...
let uimg = UIImage(cgImage: frontImage)
let data = uimg.pngData() <- EXC_BAD_ACCESS
...
My guess is that I missing something in createSourceImage
with CGDataProvider
, or something with requiredPixelBufferAttributesForRenderContext
or sourcePixelBufferAttributes
arrays.
Or if someone have other idea how to achieve this (video over video), thanks.
Have a look at this, you can try this out!
https://i.sstatic.net/jFJil.jpg
class MyCustomComposition: NSObject, AVVideoCompositing {
var sourcePixelBufferAttributes: [String : Any]? {
get {
return ["\(kCVPixelBufferPixelFormatTypeKey)": kCVPixelFormatType_32BGRA]
}
}
var requiredPixelBufferAttributesForRenderContext: [String : Any] {
get {
return ["\(kCVPixelBufferPixelFormatTypeKey)": kCVPixelFormatType_32BGRA]
}
}
func renderContextChanged(_ newRenderContext: AVVideoCompositionRenderContext) {
}
func startRequest(_ asyncVideoCompositionRequest: AVAsynchronousVideoCompositionRequest) {
let request = asyncVideoCompositionRequest
//Destination Buffer
let destination = request.renderContext.newPixelBuffer()
//Grab Frames
//My Video track id is 1 , 2 for back and front videos. Please Check your 'trackID' using 'request.sourceTrackIDs' then use it
if request.sourceTrackIDs.count != 2 { return }
guard let front = request.sourceFrame(byTrackID: 2) else { return request.finish(with: NSError(domain: "", code: 1, userInfo: nil)) }
guard let back = request.sourceFrame(byTrackID: 1) else { return request.finish(with: NSError(domain: "", code: 2, userInfo: nil)) }
CVPixelBufferLockBaseAddress(front, CVPixelBufferLockFlags.readOnly)
CVPixelBufferLockBaseAddress(back, CVPixelBufferLockFlags.readOnly)
CVPixelBufferLockBaseAddress(destination!, CVPixelBufferLockFlags.readOnly)
renderFromBuffer(destination: destination!, front: front, back: back)
CVPixelBufferUnlockBaseAddress(destination!, CVPixelBufferLockFlags.readOnly)
CVPixelBufferUnlockBaseAddress(back, CVPixelBufferLockFlags.readOnly)
CVPixelBufferUnlockBaseAddress(front, CVPixelBufferLockFlags.readOnly)
request.finish(withComposedVideoFrame: destination!)
}
private func renderFromBuffer(destination: CVPixelBuffer, front: CVPixelBuffer, back: CVPixelBuffer) {
let width = CVPixelBufferGetWidth(destination)
let height = CVPixelBufferGetHeight(destination)
let newContext = CGContext(data: CVPixelBufferGetBaseAddress(destination), width: width, height: height, bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(destination), space: CGColorSpaceCreateDeviceRGB(), bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)
let frontImage = createSourceImageFromReferance(buffer: front)
let backImage = createSourceImageFromReferance(buffer: back)
//DRAW 'BACK' IMAGE
newContext?.saveGState()
newContext?.translateBy(x: 0, y: CGFloat(height))
newContext?.scaleBy(x: 1.0, y: -1.0)
let backImagerect = CGRect(x: 0, y: 0, width: width, height: height)
newContext?.draw(backImage, in: backImagerect)
newContext?.scaleBy(x: 1.0, y: -1.0)
newContext?.translateBy(x: 0, y: -CGFloat(height / 2))
newContext?.restoreGState()
//DRAW 'FRONT' IMAGE AT CENTER
newContext?.saveGState()
newContext?.translateBy(x: 0, y: CGFloat(height))
newContext?.scaleBy(x: 1.0, y: -1.0)
let frontImageSize = CGSize(width: frontImage.width , height: frontImage.height)
let centerPoint = CGPoint(x: CGFloat(width / 2) - (frontImageSize.width / 2), y: CGFloat(height / 2) - (frontImageSize.height / 2))
let frontImagerect = CGRect(origin: centerPoint, size: frontImageSize)
//Clip Front Video using BezierPath
let bezierPath = UIBezierPath(roundedRect: frontImagerect, cornerRadius: 50)
newContext?.addPath(bezierPath.cgPath)
newContext?.clip()
newContext?.draw(frontImage, in: frontImagerect)
newContext?.scaleBy(x: 1.0, y: -1.0)
newContext?.translateBy(x: 0, y: -CGFloat(height / 2))
newContext?.restoreGState()
newContext?.flush()
}
private func createSourceImageFromReferance(buffer: CVPixelBuffer) -> CGImage {
let width = CVPixelBufferGetWidth(buffer)
let height = CVPixelBufferGetHeight(buffer)
let stride = CVPixelBufferGetBytesPerRow(buffer)
let data = CVPixelBufferGetBaseAddress(buffer)
let rgb = CGColorSpaceCreateDeviceRGB()
let releaseMaskImagePixelData: CGDataProviderReleaseDataCallback = { (info: UnsafeMutableRawPointer?, data: UnsafeRawPointer, size: Int) -> () in
// https://developer.apple.com/reference/coregraphics/cgdataproviderreleasedatacallback
return
}
let provider:CGDataProvider? = CGDataProvider(dataInfo: nil, data: data!, size: height * stride, releaseData: releaseMaskImagePixelData)
let last = CGBitmapInfo(rawValue: CGBitmapInfo.byteOrder32Big.rawValue | CGImageAlphaInfo.premultipliedLast.rawValue)
var image = CGImage(width: width, height: height, bitsPerComponent: 8, bitsPerPixel: 32, bytesPerRow: stride, space: rgb, bitmapInfo: last, provider: provider!, decode: nil, shouldInterpolate: false, intent: .defaultIntent)
var rect = CGRect(x: 0, y: 0, width: Int(width), height: Int(height))
let newContext = CGContext(data: nil, width: width, height: height, bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(buffer), space: CGColorSpaceCreateDeviceRGB(), bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)
newContext?.saveGState()
newContext?.translateBy(x: 0, y: CGFloat(height))
newContext?.scaleBy(x: 1.0, y: -1.0)
newContext?.draw(image!, in: rect)
newContext?.restoreGState()
let im = (newContext!.makeImage())!
newContext?.flush()
return im
}
}
Create the composition for above class as below :
let mainInstructionCompostion = AVMutableVideoComposition()
mainInstructionCompostion.frameDuration = CMTime(value: 1, timescale: 30)
mainInstructionCompostion.renderSize = videoInfo.videoSize
mainInstructionCompostion.instructions = [mainInstruction]
mainInstructionCompostion.customVideoCompositorClass = MyCustomComposition.self