如何将*单个图像*与视频合并

How to merge a *Single Image* with a video

我正在尝试将单个视频与单个图像结合起来。 这不是试图将许多图像组合成一个视频,例如

我正在使用 AVMutableComposition 合并曲目。我的应用程序能够组合视频和图像(但就目前而言,视频组合很好!)我尝试使用 AVAssetWriter 将单个图像转换为视频(我相信这是我的问题所在,但不是 100% 肯定)。然后我将其保存到应用程序 (documents directory)。从那里,我在我的合并中访问它,并将视频和现在变成视频的图像组合起来。

流量

用户选择图像 ->

图像到 AVAssetWriter 以更改为视频 ->

将我已经预设的视频与视频合并->

结果:使用所选图像和预设视频制作 1 个视频。

我所拥有的问题:我的代码提供了一个空白 space,视频中的图像应该在该位置。比如,我拥有的 ImageConverter 文件会将其转换为视频,但我只会看到最后一帧作为图像,而其他每一帧都是透明的,就好像图片不存在一样。因此,如果我将图像转换为视频 5 秒(假设为 30 frames/sec),那么我将看到空白 space (30*5)-1 帧,然后是最后一帧,图片终于会出现。我只是在寻找有关如何将单个图像制作成视频的指导 将视频和图像组合在一起而不将图像转换为视频。谢谢!

在此处合并文件

func merge() {
    if let firstAsset = controller.firstAsset, secondAsset = self.asset {

        // 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
        let mixComposition = AVMutableComposition()

        let firstTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeVideo,
                                                                     preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
        do {
            try firstTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, CMTime(seconds: 8, preferredTimescale: 600)),
                                           ofTrack: firstAsset.tracksWithMediaType(AVMediaTypeVideo)[0] ,
                                           atTime: kCMTimeZero)
        } catch _ {
            print("Failed to load first track")
        }

        do {
            //HERE THE TIME IS 0.666667, BUT SHOULD BE 0
            print(CMTimeGetSeconds(secondAsset.duration), CMTimeGetSeconds(firstTrack.timeRange.duration))
            try firstTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, secondAsset.duration),
                                            ofTrack: secondAsset.tracksWithMediaType(AVMediaTypeVideo)[0],
                                            atTime: firstTrack.timeRange.duration)
        } catch _ {
            print("Failed to load second track")
        }
        do {
            try firstTrack.insertTimeRange(CMTimeRangeMake(CMTime(seconds: 8+CMTimeGetSeconds(secondAsset.duration), preferredTimescale: 600), firstAsset.duration),
                                           ofTrack: firstAsset.tracksWithMediaType(AVMediaTypeVideo)[0] ,
                                           atTime: firstTrack.timeRange.duration+secondTrack.timeRange.duration)
        } catch _ {
            print("failed")
        }

        // 3 - Audio track
        if let loadedAudioAsset = controller.audioAsset {
            let audioTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: 0)
            do {
                try audioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, firstAsset.duration),
                                               ofTrack: loadedAudioAsset.tracksWithMediaType(AVMediaTypeAudio)[0] ,
                                               atTime: kCMTimeZero)
            } catch _ {
                print("Failed to load Audio track")
            }
        }

        // 4 - Get path
        let documentDirectory = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)[0]
        let dateFormatter = NSDateFormatter()
        dateFormatter.dateStyle = .LongStyle
        dateFormatter.timeStyle = .ShortStyle
        let date = dateFormatter.stringFromDate(NSDate())
        let savePath = (documentDirectory as NSString).stringByAppendingPathComponent("mergeVideo.mov")
        let url = NSURL(fileURLWithPath: savePath)
        _ = try? NSFileManager().removeItemAtURL(url)

        // 5 - Create Exporter
        print("exporting")
        guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else { return }
        exporter.outputURL = url
        exporter.outputFileType = AVFileTypeQuickTimeMovie
        exporter.shouldOptimizeForNetworkUse = false
        exporter.videoComposition = mainComposition

        // 6 - Perform the Export
        controller.currentlyEditing = true
        exporter.exportAsynchronouslyWithCompletionHandler() {
            dispatch_async(dispatch_get_main_queue()) { _ in
                print("done")
                self.controller.currentlyEditing = false
                self.controller.merged = true
                self.button.blurView.superview?.hidden = true
                self.controller.player.replaceCurrentItemWithPlayerItem(AVPlayerItem(URL: url))
                self.controller.firstAsset = AVAsset(URL: url)
            }
        }
    }
}
func exportDidFinish(session: AVAssetExportSession) {
    if session.status == AVAssetExportSessionStatus.Failed {
        print(session.error)
    }
    if session.status == AVAssetExportSessionStatus.Completed {
        print("succed")
    }
}

在此处转换图像

class MyConverter: NSObject {

    var image:UIImage!

    convenience init(image:UIImage) {
        self.init()
        self.image = image
    }

    var outputURL: NSURL {
        let documentDirectory = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)[0]
        let savePath = (documentDirectory as NSString).stringByAppendingPathComponent("mergeVideo-pic.mov")
        return getURL(savePath)
    }

    func getURL(path:String) -> NSURL {
        let movieDestinationUrl = NSURL(fileURLWithPath: path)
        _ = try? NSFileManager().removeItemAtURL(movieDestinationUrl)
        let url = NSURL(fileURLWithPath: path)
        return url
    }

    func build(completion:() -> Void) {
        guard let videoWriter = try? AVAssetWriter(URL: outputURL, fileType: AVFileTypeQuickTimeMovie) else {
            fatalError("AVAssetWriter error")
        }
        let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(float: Float(image.size.width)), AVVideoHeightKey : NSNumber(float: Float(image.size.height))]

        guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
            fatalError("Negative : Can't apply the Output settings...")
        }

        let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
        let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(unsignedInt: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(float: Float(image.size.width)), kCVPixelBufferHeightKey as String: NSNumber(float: Float(image.size.height))]
        let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)

        if videoWriter.canAddInput(videoWriterInput) {
            videoWriter.addInput(videoWriterInput)
        }

        if videoWriter.startWriting() {
            videoWriter.startSessionAtSourceTime(kCMTimeZero)
            assert(pixelBufferAdaptor.pixelBufferPool != nil)
        }

        let media_queue = dispatch_queue_create("mediaInputQueue", nil)

        videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
            var appendSucceeded = true
            //Time HERE IS ZERO, but in Merge file, it is 0.66667
            let presentationTime = CMTimeMake(0, 600)

            var pixelBuffer: CVPixelBuffer? = nil
            let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)

            if let pixelBuffer = pixelBuffer where status == 0 {
                let managedPixelBuffer = pixelBuffer
            CVPixelBufferLockBaseAddress(managedPixelBuffer, 0)

                let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
                let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
                let context = CGBitmapContextCreate(data, Int(self.image.size.width), Int(self.image.size.height), 8, CVPixelBufferGetBytesPerRow(managedPixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)

                CGContextClearRect(context, CGRectMake(0, 0, CGFloat(self.image.size.width), CGFloat(self.image.size.height)))


                CGContextDrawImage(context, CGRectMake(0, 0, self.image.size.width, self.image.size.height), self.image.CGImage)

                CVPixelBufferUnlockBaseAddress(managedPixelBuffer, 0)

                appendSucceeded =     pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)
            } else {
                print("Failed to allocate pixel buffer")
                appendSucceeded = false
            }
            if !appendSucceeded {
                print("append failed")
            }
            videoWriterInput.markAsFinished()
            videoWriter.finishWritingWithCompletionHandler { () -> Void in
                print("FINISHED!!!!!")
                completion()
            }
        })
    }
}

注意: 我发现如果我在 ImageConverter 内部执行 print(presentationTime) 它会打印 0,然后在合并内打印持续时间的时间,我得到 0.666667

注意: 还没有答案,但我会不断地把这个问题设为悬赏,直到我找到答案或有人帮助我!谢谢!

对,所以我前阵子确实处理过这个问题。问题确实在于您如何从图片创建视频。您需要做的是在时间为零时添加像素缓冲区,然后在最后添加 AGAIN,否则您将在最后一帧之前得到一个空视频,就像您正在经历的那样。

以下代码将是我更新您的代码的最佳尝试。最后,我将 post 我的解决方案 Objective-C 以防它对其他人有帮助。

func build(completion:() -> Void) {
    guard let videoWriter = try? AVAssetWriter(URL: outputURL, fileType: AVFileTypeQuickTimeMovie) else {
        fatalError("AVAssetWriter error")
    }

    // This might not be a problem for you but width HAS to be divisible by 16 or the movie will come out distorted... don't ask me why. So this is a safeguard
    let pixelsToRemove: Double = fmod(image.size.width, 16)
    let pixelsToAdd: Double = 16 - pixelsToRemove
    let size: CGSize = CGSizeMake(image.size.width + pixelsToAdd, image.size.height)

    let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(float: Float(size.width)), AVVideoHeightKey : NSNumber(float: Float(size.height))]

    guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
        fatalError("Negative : Can't apply the Output settings...")
    }

    let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
    let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(unsignedInt: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(float: Float(size.width)), kCVPixelBufferHeightKey as String: NSNumber(float: Float(size.height))]
    let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)

    if videoWriter.canAddInput(videoWriterInput) {
        videoWriter.addInput(videoWriterInput)
    }

    if videoWriter.startWriting() {
        videoWriter.startSessionAtSourceTime(kCMTimeZero)
        assert(pixelBufferAdaptor.pixelBufferPool != nil)
    }

    // For simplicity, I'm going to remove the media queue you created and instead explicitly wait until I can append since i am only writing one pixel buffer at two different times

    var pixelBufferCreated = true
    var pixelBuffer: CVPixelBuffer? = nil
    let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)

    if let pixelBuffer = pixelBuffer where status == 0 {
        let managedPixelBuffer = pixelBuffer
        CVPixelBufferLockBaseAddress(managedPixelBuffer, 0)

        let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
        let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
        let context = CGBitmapContextCreate(data, Int(size.width), Int(size.height), 8, CVPixelBufferGetBytesPerRow(managedPixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)

        CGContextClearRect(context, CGRectMake(0, 0, CGFloat(size.width), CGFloat(size.height)))

        CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), self.image.CGImage)

        CVPixelBufferUnlockBaseAddress(managedPixelBuffer, 0)
    } else {
        print("Failed to allocate pixel buffer")
        pixelBufferCreated = false
    }

    if (pixelBufferCreated) {
        // Here is where the magic happens, we have our pixelBuffer it's time to start writing

        // FIRST - add at time zero
        var appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: kCMTimeZero];
        if (!appendSucceeded) {
            // something went wrong, up to you to handle. Should probably return so the rest of the code is not executed though
        }
        // SECOND - wait until the writer is ready for more data with an empty while
        while !writerInput.readyForMoreMediaData {} 

        // THIRD - make a CMTime with the desired length of your picture-video. I am going to arbitrarily make it 5 seconds here
        let frameTime: CMTime = CMTimeMake(5, 1) // 5 seconds

        // FOURTH - add the same exact pixel to the end of the video you are creating
        appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: frameTime];
        if (!appendSucceeded) {
            // something went wrong, up to you to handle. Should probably return so the rest of the code is not executed though
        }

        videoWriterInput.markAsFinished() {
            videoWriter.endSessionAtSourceTime(frameTime)
        }
        videoWriter.finishWritingWithCompletionHandler { () -> Void in
            if videoWriter.status != .Completed {
                // Error writing the video... handle appropriately 
            } else {
                print("FINISHED!!!!!")
                completion()
            }
        }
    }
}

我是如何在 Obj-C 中做到这一点的

注意:我必须进行一些编辑才能使其独立,因此此方法将 return 一个字符串,其中包含视频 写入的路径。它是在视频写入完成之前 return 编辑的,因此如果您不小心 可能 可以在它准备好之前访问它

-(NSString *)makeMovieFromImageData:(NSData *)imageData {
    NSError *error;
    UIImage *image = [UIImage imageWithData:imageData];


    // width has to be divisible by 16 or the movie comes out distorted... don't ask me why
    double pixelsToRemove = fmod(image.size.width, 16);

    double pixelsToAdd = 16 - pixelsToRemove; 

    CGSize size = CGSizeMake(image.size.width+pixelsToAdd, image.size.height);

    BOOL hasFoundValidPath = NO;
    NSURL *tempFileURL;
    NSString *outputFile;

    while (!hasFoundValidPath) {

        NSString *guid = [[NSUUID new] UUIDString];
        outputFile = [NSString stringWithFormat:@"picture_%@.mp4", guid];

        NSString *outputDirectory = [NSSearchPathForDirectoriesInDomains(NSTemporaryDirectory, NSUserDomainMask, YES) objectAtIndex:0];

        NSString *tempPath = [outputDirectory stringByAppendingPathComponent:outputFile];

        // Will fail if destination already has a file
        if ([[NSFileManager defaultManager] fileExistsAtPath:tempPath]) {
            continue;
        } else {
            hasFoundValidPath = YES;
        }
        tempFileURL = [NSURL fileURLWithPath:tempPath];
    }


    // Start writing
    AVAssetWriter *videoWriter = [[AVAssetWriter alloc] initWithURL:tempFileURL
                                                           fileType:AVFileTypeQuickTimeMovie
                                                              error:&error];

    if (error) {
       // handle error
    }

    NSDictionary *videoSettings = [NSDictionary dictionaryWithObjectsAndKeys:
                                   AVVideoCodecH264, AVVideoCodecKey,
                                   [NSNumber numberWithInt:size.width], AVVideoWidthKey,
                                   [NSNumber numberWithInt:size.height], AVVideoHeightKey,
                                   nil];

    AVAssetWriterInput* writerInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo
                                                                         outputSettings:videoSettings];

    NSDictionary *bufferAttributes = [NSDictionary dictionaryWithObjectsAndKeys:
                                      [NSNumber numberWithInt:kCVPixelFormatType_32ARGB], kCVPixelBufferPixelFormatTypeKey, nil];

    AVAssetWriterInputPixelBufferAdaptor *adaptor = [AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput:writerInput
                                                                                                                     sourcePixelBufferAttributes:bufferAttributes];
    if ([videoWriter canAddInput:writerInput]) {
        [videoWriter addInput:writerInput];
    } else {
        // handle error
    }

    [videoWriter startWriting];

    [videoWriter startSessionAtSourceTime:kCMTimeZero];

    CGImageRef img = [image CGImage];

    // Now I am going to create the bixelBuffer
    NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:
                            [NSNumber numberWithBool:YES], kCVPixelBufferCGImageCompatibilityKey,
                            [NSNumber numberWithBool:YES], kCVPixelBufferCGBitmapContextCompatibilityKey,
                            nil];
    CVPixelBufferRef buffer = NULL;

    CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, size.width,
                                          size.height, kCVPixelFormatType_32ARGB, (__bridge CFDictionaryRef) options,
                                          &pxbuffer);

    if ( !(status == kCVReturnSuccess && pxbuffer != NULL) ) {
        NSLog(@"There be some issue. We didn't get a buffer from the image");
    }


    CVPixelBufferLockBaseAddress(buffer, 0);
    void *pxdata = CVPixelBufferGetBaseAddress(buffer);

    CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();

    CGContextRef context = CGBitmapContextCreate(pxdata, size.width,
                                                 size.height, 8, 4*size.width, rgbColorSpace,
                                                 (CGBitmapInfo)kCGImageAlphaPremultipliedFirst);
    CGContextSetRGBFillColor(context, 0, 0, 0, 0);

    CGContextConcatCTM(context, CGAffineTransformIdentity);

    CGContextDrawImage(context, CGRectMake(0, 0, size.width,
                                           size.height), image);
    CGColorSpaceRelease(rgbColorSpace);
    CGContextRelease(context);

    CVPixelBufferUnlockBaseAddress(buffer, 0);

    // At this point we have our buffer so we are going to start by adding to time zero

    [adaptor appendPixelBuffer:buffer withPresentationTime:kCMTimeZero];

    while (!writerInput.readyForMoreMediaData) {} // wait until ready

    CMTime frameTime = CMTimeMake(5, 1); // 5 second frame

    [adaptor appendPixelBuffer:buffer withPresentationTime:frameTime];
    CFRelease(buffer);

    [writerInput markAsFinished];

    [videoWriter endSessionAtSourceTime:frameTime];

    [videoWriter finishWritingWithCompletionHandler:^{
        if (videoWriter.status != AVAssetWriterStatusCompleted) {
            // Error
        }
    }]; // end videoWriter finishWriting Block

    // NOTE: the URL is actually being returned before the videoWriter finishes writing so be careful to not access it until it's ready
    return outputFile;
}

这是对我有用的我希望这对你有用:-

-(void)MixVideo:(NSString *)vidioUrlString withImage:(UIImage *)img
{
    NSURL *videoUrl1 = [[NSURL alloc] initFileURLWithPath:vidioUrlString];
    AVURLAsset* videoAsset = [[AVURLAsset alloc]initWithURL:videoUrl1 options:nil];

    AVMutableComposition* mixComposition = [AVMutableComposition composition];

    AVMutableCompositionTrack *compositionVideoTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeVideo preferredTrackID:kCMPersistentTrackID_Invalid];

    AVAssetTrack *clipVideoTrack = [[videoAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0];

    AVMutableCompositionTrack *compositionAudioTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeAudio preferredTrackID:kCMPersistentTrackID_Invalid];

    AVAssetTrack *clipAudioTrack = [[videoAsset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0];


    [compositionVideoTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, videoAsset.duration) ofTrack:clipVideoTrack atTime:kCMTimeZero error:nil];

    [compositionAudioTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, videoAsset.duration) ofTrack:clipAudioTrack atTime:kCMTimeZero error:nil];

    [compositionVideoTrack setPreferredTransform:[[[videoAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0] preferredTransform]];

    CGSize sizeOfVideo = CGSizeMake(320, 568);

    //Image of watermark
    UIImage *myImage=img;

    CALayer *layerCa = [CALayer layer];

    layerCa.contents = (id)myImage.CGImage;
    layerCa.frame = CGRectMake(0, 0, sizeOfVideo.width, sizeOfVideo.height);

    layerCa.opacity = 1.0;

    CALayer *parentLayer=[CALayer layer];

    CALayer *videoLayer=[CALayer layer];

    parentLayer.frame=CGRectMake(0, 0, sizeOfVideo.width, sizeOfVideo.height);

    videoLayer.frame=CGRectMake(0, 0, sizeOfVideo.width, sizeOfVideo.height);
    [parentLayer addSublayer:videoLayer];

    [parentLayer addSublayer:layerCa];

    AVMutableVideoComposition *videoComposition=[AVMutableVideoComposition videoComposition] ;

    videoComposition.frameDuration=CMTimeMake(1, 30);

    videoComposition.renderSize=sizeOfVideo;

    videoComposition.animationTool=[AVVideoCompositionCoreAnimationTool videoCompositionCoreAnimationToolWithPostProcessingAsVideoLayer:videoLayer inLayer:parentLayer];

    AVMutableVideoCompositionInstruction *instruction = [AVMutableVideoCompositionInstruction videoCompositionInstruction];

    instruction.timeRange = CMTimeRangeMake(kCMTimeZero, [mixComposition duration]);

    AVAssetTrack *videoTrack = [[mixComposition tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0];

    AVMutableVideoCompositionLayerInstruction* layerInstruction = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:videoTrack];

    instruction.layerInstructions = [NSArray arrayWithObject:layerInstruction];

    videoComposition.instructions = [NSArray arrayWithObject: instruction];

    NSString *documentsDirectory = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES)objectAtIndex:0];

    finalPath = [documentsDirectory stringByAppendingFormat:@"/myVideo.mp4"];

    if ([[NSFileManager defaultManager] fileExistsAtPath:finalPath])
    {
        [[NSFileManager defaultManager] removeItemAtPath:finalPath error:nil];
    }

    SDAVAssetExportSession *encoder = [SDAVAssetExportSession.alloc initWithAsset:mixComposition];
    encoder.outputFileType = AVFileTypeMPEG4;
    encoder.outputURL = [NSURL fileURLWithPath:finalPath];
    encoder.videoComposition=videoComposition;
    encoder.videoSettings = @
    {
    AVVideoCodecKey: AVVideoCodecH264,
    AVVideoWidthKey: @320,
    AVVideoHeightKey: @568,
    AVVideoCompressionPropertiesKey: @
        {
        AVVideoAverageBitRateKey: @900000,
        AVVideoProfileLevelKey: AVVideoProfileLevelH264MainAutoLevel,
        },
    };
    encoder.audioSettings = @
    {
    AVFormatIDKey: @(kAudioFormatMPEG4AAC),
    AVNumberOfChannelsKey: @2,
    AVSampleRateKey: @44100,
    AVEncoderBitRateKey: @128000,
    };

    [encoder exportAsynchronouslyWithCompletionHandler:^
     {

         if (encoder.status == AVAssetExportSessionStatusCompleted)
         {

             NSLog(@"Video export succeeded");
             if (UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(finalPath))
             {

                 NSLog(@"Video exported successfully path = %@ ",finalPath);
             }

         }
         else if (encoder.status == AVAssetExportSessionStatusCancelled)
         {
             NSLog(@"Video export cancelled");
         }
         else
         {
             NSLog(@"Video export failed with error: %@ (%ld)", encoder.error.localizedDescription, (long)encoder.error.code);
         }
     }];

}

这对我有用,将单个图像导出到视频(视频是可移动的而不是静态的)。 Swift3.

//
//  CXEImageToAssetURL.swift
//  CXEngine
//
//  Created by wulei on 16/12/14.
//  Copyright © 2016年 wulei. All rights reserved.
//

import Foundation
import AVFoundation
import UIKit
import Photos

fileprivate extension UIImage{
    func normalizedImage() -> UIImage?{
//        if self.imageOrientation == .up{
//            return self
//        }
        let factor = CGFloat(0.8)
        UIGraphicsBeginImageContextWithOptions(CGSize(width:self.size.width * factor, height: self.size.height * factor), false, self.scale)
        self.draw(in: CGRect(x: 0, y: 0, width: self.size.width * factor, height: self.size.height * factor))
        let normalImage = UIGraphicsGetImageFromCurrentImageContext()
        UIGraphicsEndImageContext()
        return normalImage
    }

//    func clipImage() -> UIImage {

//        var x = CGFloat(0)
//        var y = CGFloat(0)
//        let imageHeight = (self.size.width * 9) / 16
//        y = (self.size.height - imageHeight)/2
//        var rcTmp = CGRect(origin: CGPoint(x: x, y: y), size: self.size)
//        if self.scale > 1.0 {
//            rcTmp = CGRect(x: rcTmp.origin.x * self.scale, y: rcTmp.origin.y * self.scale, width: rcTmp.size.width * self.scale, height: rcTmp.size.height * self.scale)
//        }
//        rcTmp.size.height = imageHeight
//        let imageRef = self.cgImage!.cropping(to: rcTmp)
//        let result = UIImage(cgImage: imageRef!, scale: self.scale, orientation: self.imageOrientation)
//        return result
//        return self
//    }
}

public typealias CXEImageToVideoProgress = (Float) -> Void
typealias CXEMovieMakerUIImageExtractor = (AnyObject) -> UIImage?


public class CXEImageToVideo: NSObject{

    //MARK: Private Properties

    private var assetWriter:AVAssetWriter!
    private var writeInput:AVAssetWriterInput!
    private var bufferAdapter:AVAssetWriterInputPixelBufferAdaptor!
    private var videoSettings:[String : Any]!
    private var frameTime:CMTime!
    private var fileURL:URL!
    private var duration:Int = 0

    //MARK: Class Method

     private func videoSettingsFunc(width:Int, height:Int) -> [String: Any]{
        if(Int(width) % 16 != 0){
            print("warning: video settings width must be divisible by 16")
        }

        let videoSettings:[String: Any] = [AVVideoCodecKey: AVVideoCodecH264,
                                           AVVideoWidthKey: width,
                                           AVVideoHeightKey: height]

        return videoSettings
    }

    //MARK: Public methods

    public init(fileURL: URL, videoWidth:Int, videoHeight:Int) {
        super.init()

        self.videoSettings = videoSettingsFunc(width: videoWidth, height: videoHeight)

        self.fileURL = fileURL
        self.assetWriter = try! AVAssetWriter(url: self.fileURL, fileType: AVFileTypeQuickTimeMovie)

        self.writeInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
        assert(self.assetWriter.canAdd(self.writeInput), "add failed")

        self.assetWriter.add(self.writeInput)
        let bufferAttributes:[String: Any] = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32ARGB)]
        self.bufferAdapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: self.writeInput, sourcePixelBufferAttributes: bufferAttributes)
        self.frameTime = CMTimeMake(1, 25)
    }

//    public func createMovieFrom(url: URL, duration:Int, progressExtractor: CXEImageToVideoProgress){
//        self.duration = duration
//        self.createMovieFromSource(image: url as AnyObject, extractor:{(inputObject:AnyObject) ->UIImage? in
//            return UIImage(data: try! Data(contentsOf: inputObject as! URL))}, progressExtractor: progressExtractor)
//    }

    public func createMovieFrom(imageData: Data, duration:Int, progressExtractor: CXEImageToVideoProgress){
        var image = UIImage(data: imageData)
        image = image?.normalizedImage()
        assert(image != nil)
        self.duration = duration

        self.createMovieFromSource(image: image!, extractor: {(inputObject:AnyObject) -> UIImage? in
            return inputObject as? UIImage}, progressExtractor: progressExtractor)
    }

    //MARK: Private methods

    private func createMovieFromSource(image: AnyObject, extractor: @escaping CXEMovieMakerUIImageExtractor, progressExtractor: CXEImageToVideoProgress){

        self.assetWriter.startWriting()
        let zeroTime = CMTimeMake(Int64(0),self.frameTime.timescale)
        self.assetWriter.startSession(atSourceTime: zeroTime)

        while !self.writeInput.isReadyForMoreMediaData {
            usleep(100)
        }

        var sampleBuffer:CVPixelBuffer?
        var pxDataBuffer:CVPixelBuffer?
        let img = extractor(image)
        assert(img != nil)

        let options:[String: Any] = [kCVPixelBufferCGImageCompatibilityKey as String: true, kCVPixelBufferCGBitmapContextCompatibilityKey as String: true]
        let frameHeight = self.videoSettings[AVVideoHeightKey] as! Int
        let frameWidth = self.videoSettings[AVVideoWidthKey] as! Int
        let originHeight = frameWidth * img!.cgImage!.height / img!.cgImage!.width
        let heightDifference = originHeight - frameHeight

        let frameCounts = self.duration * Int(self.frameTime.timescale)
        let spacingOfHeight = heightDifference / frameCounts

        sampleBuffer = self.newPixelBufferFrom(cgImage: img!.cgImage!)
        assert(sampleBuffer != nil)

        var presentTime = CMTimeMake(1, self.frameTime.timescale)
        var stepRows = 0

        for i in 0..<frameCounts {
            progressExtractor(Float(i) / Float(frameCounts))

            CVPixelBufferLockBaseAddress(sampleBuffer!, CVPixelBufferLockFlags(rawValue: 0))
            let pointer = CVPixelBufferGetBaseAddress(sampleBuffer!)
            var pxData = pointer?.assumingMemoryBound(to: UInt8.self)
            let bytes = CVPixelBufferGetBytesPerRow(sampleBuffer!) * stepRows
            pxData = pxData?.advanced(by: bytes)

            let status = CVPixelBufferCreateWithBytes(kCFAllocatorDefault, frameWidth, frameHeight, kCVPixelFormatType_32ARGB, pxData!, CVPixelBufferGetBytesPerRow(sampleBuffer!), nil, nil, options as CFDictionary?, &pxDataBuffer)
            assert(status == kCVReturnSuccess && pxDataBuffer != nil, "newPixelBuffer failed")
            CVPixelBufferUnlockBaseAddress(sampleBuffer!, CVPixelBufferLockFlags(rawValue: 0))

            while !self.writeInput.isReadyForMoreMediaData {
                usleep(100)
            }
            if (self.writeInput.isReadyForMoreMediaData){
                if i == 0{
                    self.bufferAdapter.append(pxDataBuffer!, withPresentationTime: zeroTime)
                }else{
                    self.bufferAdapter.append(pxDataBuffer!, withPresentationTime: presentTime)
                }
                presentTime = CMTimeAdd(presentTime, self.frameTime)
            }

            stepRows += spacingOfHeight
        }


        self.writeInput.markAsFinished()
        self.assetWriter.finishWriting {}

        var isSuccess:Bool = false
        while(!isSuccess){
            switch self.assetWriter.status {
            case .completed:
                isSuccess = true
                print("completed")
            case .writing:
                usleep(100)
                print("writing")
            case .failed:
                isSuccess = true
                print("failed")
            case .cancelled:
                isSuccess = true
                print("cancelled")
            default:
                isSuccess = true
                print("unknown")
            }
        }
    }

    private func newPixelBufferFrom(cgImage:CGImage) -> CVPixelBuffer?{
        let options:[String: Any] = [kCVPixelBufferCGImageCompatibilityKey as String: true, kCVPixelBufferCGBitmapContextCompatibilityKey as String: true]
        var pxbuffer:CVPixelBuffer?
        let frameWidth = self.videoSettings[AVVideoWidthKey] as! Int
        let frameHeight = self.videoSettings[AVVideoHeightKey] as! Int

        let originHeight = frameWidth * cgImage.height / cgImage.width

        let status = CVPixelBufferCreate(kCFAllocatorDefault, frameWidth, originHeight, kCVPixelFormatType_32ARGB, options as CFDictionary?, &pxbuffer)
        assert(status == kCVReturnSuccess && pxbuffer != nil, "newPixelBuffer failed")

        CVPixelBufferLockBaseAddress(pxbuffer!, CVPixelBufferLockFlags(rawValue: 0))
        let pxdata = CVPixelBufferGetBaseAddress(pxbuffer!)
        let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
        let context = CGContext(data: pxdata, width: frameWidth, height: originHeight, bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pxbuffer!), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue)
        assert(context != nil, "context is nil")

        context!.concatenate(CGAffineTransform.identity)
        context!.draw(cgImage, in: CGRect(x: 0, y: 0, width: frameWidth, height: originHeight))
        CVPixelBufferUnlockBaseAddress(pxbuffer!, CVPixelBufferLockFlags(rawValue: 0))
        return pxbuffer
    }
}

Swift 3 代码,应该是一个好的开始。如果在生产中使用,仍然需要错误处理和处理某些视频的视频大小/方向。

@discardableResult func merge(
    video videoPath: String,
    withForegroundImage foregroundImage: UIImage,
    completion: @escaping (AVAssetExportSession) -> Void) -> AVAssetExportSession {

    let videoUrl = URL(fileURLWithPath: videoPath)
    let videoUrlAsset = AVURLAsset(url: videoUrl, options: nil)

    // Setup `mutableComposition` from the existing video
    let mutableComposition = AVMutableComposition()
    let videoAssetTrack = videoUrlAsset.tracks(withMediaType: AVMediaTypeVideo).first!
    let videoCompositionTrack = mutableComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid)
    videoCompositionTrack.preferredTransform = videoAssetTrack.preferredTransform
    try! videoCompositionTrack.insertTimeRange(CMTimeRange(start:kCMTimeZero, duration:videoAssetTrack.timeRange.duration), of: videoAssetTrack, at: kCMTimeZero)
    let audioAssetTrack = videoUrlAsset.tracks(withMediaType: AVMediaTypeAudio).first!
    let audioCompositionTrack = mutableComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
    try! audioCompositionTrack.insertTimeRange(CMTimeRange(start: kCMTimeZero, duration:audioAssetTrack.timeRange.duration), of: audioAssetTrack, at: kCMTimeZero)

    // Create a `videoComposition` to represent the `foregroundImage`
    let videoSize: CGSize = videoCompositionTrack.naturalSize
    let frame = CGRect(x: 0.0, y: 0.0, width: videoSize.width, height: videoSize.height)
    let imageLayer = CALayer()
    imageLayer.contents = foregroundImage.cgImage
    imageLayer.frame = frame
    let videoLayer = CALayer()
    videoLayer.frame = frame
    let animationLayer = CALayer()
    animationLayer.frame = frame
    animationLayer.addSublayer(videoLayer)
    animationLayer.addSublayer(imageLayer)
    let videoComposition = AVMutableVideoComposition(propertiesOf: videoCompositionTrack.asset!)
    videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: animationLayer)

    // Export the video
    let documentDirectory = NSSearchPathForDirectoriesInDomains(FileManager.SearchPathDirectory.cachesDirectory, FileManager.SearchPathDomainMask.userDomainMask, true).first!
    let documentDirectoryUrl = URL(fileURLWithPath: documentDirectory)
    let destinationFilePath = documentDirectoryUrl.appendingPathComponent("video_\(NSUUID().uuidString).mov")
    let exportSession = AVAssetExportSession( asset: mutableComposition, presetName: AVAssetExportPresetHighestQuality)!
    exportSession.videoComposition = videoComposition
    exportSession.outputURL = destinationFilePath
    exportSession.outputFileType = AVFileTypeQuickTimeMovie
    exportSession.exportAsynchronously { [weak exportSession] in
        if let strongExportSession = exportSession {
            completion(strongExportSession)
        }
    }

    return exportSession
}