Search code examples
iosswiftswift4.2

Problem converting array of images to video


So I am trying to convert a quite large array of images, of around 30 images to a video in this app I am working on.

My code to do the conversion is based on this code that I saw on this question: Making video from UIImage array with different transition animations

A problem that I saw that is quite frequent in the attempts to do this type of conversion is to have some kind of problem with the output of AVAssetsWriter, but that seems to be ok for me.

I don't know if this can be the problem but if I check pixelBufferPool is null before I start videoWriter it says it is null, but after it starts it says is not null.

This is my code to make the conversion:

var outputSize = CGSize(width: 1920, height: 1280)
    let imagesPerSecond: TimeInterval = 0.3 //each image will be stay for 3 secs
    var selectedPhotosArray = [UIImage()] 
    let audioIsEnabled: Bool = false //if your video has no sound
    var asset: AVAsset!
    var videoCriado : Bool = false
    var publicId : String?   
    var videoPlayer : AVPlayer?
    func buildVideoFromImageArray(imageArrayToVideoURL: URL, completion: @escaping (AVPlayer) -> ())  {

        removeFileAtURLIfExists(url: imageArrayToVideoURL as NSURL)
        guard let videoWriter = try? AVAssetWriter(outputURL: imageArrayToVideoURL as URL, fileType: AVFileType.mp4) else {
            fatalError("AVVideoCodecType.h264 error")
        }
        let outputSettings = [AVVideoCodecKey : AVVideoCodecType.h264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width)), AVVideoHeightKey : NSNumber(value: Float(outputSize.height))] as [String : Any]
        guard videoWriter.canApply(outputSettings: outputSettings, forMediaType: AVMediaType.video) else {
            fatalError("Negative : Can't apply the Output settings...")
        }
        let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: outputSettings)
        print(videoWriter.status.rawValue)
        print(videoWriter.outputURL)

        let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(value: Float(outputSize.width)), kCVPixelBufferHeightKey as String: NSNumber(value: Float(outputSize.height))]
        let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)

        if videoWriter.canAdd(videoWriterInput) {
            videoWriter.add(videoWriterInput)
        }
        if videoWriter.startWriting() {
            print(videoWriter.status.rawValue)
            let zeroTime = CMTimeMake(value: Int64(imagesPerSecond),timescale: Int32(1))
            videoWriter.startSession(atSourceTime: zeroTime)

            assert(pixelBufferAdaptor.pixelBufferPool != nil)
            let media_queue = DispatchQueue(label: "mediaInputQueue")
            videoWriterInput.requestMediaDataWhenReady(on: media_queue, using: { () -> Void in
                let fps: Int32 = 1
                let framePerSecond: Int64 = Int64(self.imagesPerSecond)
                let frameDuration = CMTimeMake(value: Int64(self.imagesPerSecond), timescale: fps)
                var frameCount: Int64 = 0
                var appendSucceeded = true
                while (!self.selectedPhotosArray.isEmpty) {
                    if (videoWriterInput.isReadyForMoreMediaData) {
                        let nextPhoto = self.selectedPhotosArray.remove(at: 0)
                        let lastFrameTime = CMTimeMake(value: frameCount * framePerSecond, timescale: fps)
                        let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
                        var pixelBuffer: CVPixelBuffer? = nil
                        let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
                        if let pixelBuffer = pixelBuffer, status == 0 {
                            let managedPixelBuffer = pixelBuffer
                            CVPixelBufferLockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
                            let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
                            let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
                            let context = CGContext(data: data, width: Int(self.outputSize.width), height: Int(self.outputSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(managedPixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
                            context!.clear(CGRect(x: 0, y: 0, width: CGFloat(self.outputSize.width), height: CGFloat(self.outputSize.height)))
                            let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
                            let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
                            //let aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
                            let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
                            let newSize: CGSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
                            let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
                            let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
                            context?.draw(nextPhoto.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
                            CVPixelBufferUnlockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
                            appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
                        } else {
                            print("Failed to allocate pixel buffer")
                            appendSucceeded = false
                        }
                    }
                    if !appendSucceeded {
                        break
                    }
                    frameCount += 1
                }
                videoWriterInput.markAsFinished()
                videoWriter.finishWriting { () -> Void in
                    print("-----video1 url = \(imageArrayToVideoURL)")

                    self.asset = AVAsset(url: imageArrayToVideoURL)
                    self.videoPlayer = AVPlayer(url: imageArrayToVideoURL)

                    //self.videoCriado = true
                    //self.resultUrl = self.exportVideoWithAnimation()
                    completion(self.videoPlayer!)
                    //self.exportVideoWithAnimation()
                }
            })
        }
        //return asset
    }

And this is how I am calling the function:

let documentsURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!
            let fileURL = documentsURL.appendingPathComponent("\(public_id!.description).mp4")
            //let videoSize = CGSize(width: moldura.larguraMoldura, height: moldura.alturaMoldura)
            imageToVideo.selectedPhotosArray = fotosBoomerangArray
            let sizeVideo = CGSize(width: moldura.larguraMoldura, height: moldura.alturaMoldura)
            imageToVideo.outputSize = sizeVideo
            imageToVideo.buildVideoFromImageArray(imageArrayToVideoURL: fileURL, completion: {
                (video) in
                DispatchQueue.main.async {
                    self.videoPlayer = video
                    self.irParaPreview()
                }                
            })

So what this is returning to me is a not playable video, if I try to play it I get just the iOS player with a bar cross over the play symbol and a wheel next to the time bar continuously spinning. I also need the data of the file to upload the video, that when I try to get it it is null.


Solution

  • So I was able to make a working solution combining some of the methods found in the links provided by Robin Stewart. It's worth pointing out that using them like they were didn't work for me, it was only when I made some alterations that it finally worked. Maybe this has something to do that most are in Swift 3 and I am using Swift 4.2.

    Here is my solution:

    func writeImagesAsMovie(_ allImages: [UIImage], videoPath: String, videoSize: CGSize, videoFPS: Int32, completion: @escaping (Bool) -> ()) -> Bool{
    
            guard let assetWriter = try? AVAssetWriter(outputURL: URL(string: videoPath)!, fileType: AVFileType.mp4) else {
                fatalError("AVVideoCodecType.h264 error")
            }
            let outputSettings = [AVVideoCodecKey : AVVideoCodecType.h264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width)), AVVideoHeightKey : NSNumber(value: Float(outputSize.height))] as [String : Any]
            guard assetWriter.canApply(outputSettings: outputSettings, forMediaType: AVMediaType.video) else {
                fatalError("Negative : Can't apply the Output settings...")
            }
            let writerInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: outputSettings)
    
    
            let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(value: Float(outputSize.width)), kCVPixelBufferHeightKey as String: NSNumber(value: Float(outputSize.height))]
            let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: writerInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
    
    
            if assetWriter.canAdd(writerInput) {
                assetWriter.add(writerInput)
            }
            // Start writing session
            if assetWriter.startWriting() {
                assetWriter.startSession(atSourceTime: CMTime.zero)
    
                // -- Create queue for <requestMediaDataWhenReadyOnQueue>
                assert(pixelBufferAdaptor.pixelBufferPool != nil)
                let mediaQueue = DispatchQueue(label: "mediaInputQueue", attributes: [])
    
                // -- Set video parameters
                let frameDuration = CMTimeMake(value: 1, timescale: videoFPS)
                var frameCount = 0
    
                // -- Add images to video
                let numImages = allImages.count
                writerInput.requestMediaDataWhenReady(on: mediaQueue, using: { () -> Void in
                    // Append unadded images to video but only while input ready
                    while (writerInput.isReadyForMoreMediaData && frameCount < numImages) {
                        let lastFrameTime = CMTimeMake(value: Int64(frameCount), timescale: videoFPS)
                        let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
    
                        if !self.appendPixelBufferForImageAtURL(allImages[frameCount], pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
                            print("Error converting images to video: AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer")
                            return
                        }
    
                        frameCount += 1
                    }
    
                    // No more images to add? End video.
                    if (frameCount >= numImages) {
                        writerInput.markAsFinished()
                        assetWriter.finishWriting {
                            if (assetWriter.error != nil) {
                                print("Error converting images to video: \(assetWriter.error)")
                            } else {
                                print("Converted images to movie @ \(videoPath)")
                                completion(true)
                            }
                        }
                    }
                })
            }
    
            return true
        }
    
    
        func createAssetWriter(_ path: String, size: CGSize) -> AVAssetWriter? {
            // Convert <path> to NSURL object
            let pathURL = URL(fileURLWithPath: path)
    
            // Return new asset writer or nil
            do {
                // Create asset writer
                let newWriter = try AVAssetWriter(outputURL: pathURL, fileType: AVFileType.mp4)
    
                // Define settings for video input
                let videoSettings: [String : AnyObject] = [
                    AVVideoCodecKey  : AVVideoCodecType.h264 as AnyObject,
                    AVVideoWidthKey  : size.width as AnyObject,
                    AVVideoHeightKey : size.height as AnyObject,
                ]
    
                // Add video input to writer
                let assetWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoSettings)
                newWriter.add(assetWriterVideoInput)
    
                // Return writer
                print("Created asset writer for \(size.width)x\(size.height) video")
                return newWriter
            } catch {
                print("Error creating asset writer: \(error)")
                return nil
            }
        }
    
    
        func appendPixelBufferForImageAtURL(_ image: UIImage, pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor, presentationTime: CMTime) -> Bool {
            var appendSucceeded = false
    
            autoreleasepool {
                if  let pixelBufferPool = pixelBufferAdaptor.pixelBufferPool {
                    let pixelBufferPointer = UnsafeMutablePointer<CVPixelBuffer?>.allocate(capacity:1)
                    let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(
                        kCFAllocatorDefault,
                        pixelBufferPool,
                        pixelBufferPointer
                    )
    
                    if let pixelBuffer = pixelBufferPointer.pointee , status == 0 {
                        fillPixelBufferFromImage(image, pixelBuffer: pixelBuffer)
                        appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
                        pixelBufferPointer.deinitialize()
                    } else {
                        NSLog("Error: Failed to allocate pixel buffer from pool")
                    }
    
                    pixelBufferPointer.deallocate(capacity: 1)
                }
            }
    
            return appendSucceeded
        }
    
    
        func fillPixelBufferFromImage(_ image: UIImage, pixelBuffer: CVPixelBuffer) {
            CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
    
            let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer)
            let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
    
            // Create CGBitmapContext
            let context = CGContext(
                data: pixelData,
                width: Int(image.size.width),
                height: Int(image.size.height),
                bitsPerComponent: 8,
                bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer),
                space: rgbColorSpace,
                bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue
                )!
    
            // Draw image into context
            let drawCGRect = CGRect(x:0, y:0, width:image.size.width, height:image.size.height)
            var drawRect = NSCoder.string(for: drawCGRect);
            let ciImage = CIImage(image: image)
            let cgImage = convertCIImageToCGImage(inputImage: ciImage!)
            context.draw(cgImage!, in: CGRect(x: 0.0,y: 0.0,width: image.size.width,height: image.size.height))
    
            CVPixelBufferUnlockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
        }
    
        func convertCIImageToCGImage(inputImage: CIImage) -> CGImage! {
            let context = CIContext(options: nil)
            if context != nil {
                return context.createCGImage(inputImage, from: inputImage.extent)
            }
            return nil
        }
    
    }