第二次导出AVAsset让视频空白
Exporting AVAsset second time makes video blank
我正在使用 AVMutableComposition()
添加曲目将多个视频文件拼接成一个,如下所示:
let compositionVideoTrack = mainComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
let soundtrackTrack = mainComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
private var insertTime = CMTime.zero
for videoAsset in arrayVideos {
try! compositionVideoTrack?.insertTimeRange(CMTimeRangeMake(start: .zero, duration: videoAsset.duration), of: videoAsset.tracks(withMediaType: .video)[0], at: insertTime)
try! soundtrackTrack?.insertTimeRange(CMTimeRangeMake(start: .zero, duration: videoAsset.duration), of: videoAsset.tracks(withMediaType: .audio)[0], at: insertTime)
insertTime = CMTimeAdd(insertTime, videoAsset.duration)
}
然后使用 AVAssetExportSession(asset: mainComposition, presetName: AVAssetExportPresetMediumQuality)
将它们导出到 .mov 文件中。
这会将拼接的视频保存到 url,我可以使用 AVAsset 访问它并显示给用户。之后,我尝试向视频添加图像叠加并再次导出。
在第二种方法中,我从 url AVAsset(url: fileUrl)
实例化了 AVAsset。并创建新的 AVMutableComposition()
。我将视频和音频轨道添加到资产的合成中:
compositionTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
compositionTrack.insertTimeRange(timeRange, of: asset.tracks(withMediaType: .video)[], at: .zero)
...
然后我使用图层和 AVVideoCompositionCoreAnimationTool()
向视频添加叠加层,如下所示:
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: videoSize)
overlayLayer.contents = watermark.cgImage
overlayLayer.contentsGravity = .resizeAspect
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: outputLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero, duration: asset.duration)
videoComposition.instructions = [instruction]
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: assetTrack)
layerInstruction.setTransform(assetTrack.preferredTransform, at: .zero)
instruction.layerInstructions = [layerInstruction]
然后我以与第一次导出相同的方式导出视频。
问题是我合并这个的时候。
如果我只使用第二种方法导出一些示例视频,则会添加视频叠加层,一切都符合预期。
如果我使用第一种方法拼接视频,则可以完美拼接视频。但是,当我结合这些方法时,生成的视频是黑色空白屏幕(音频和叠加层很好,生成的视频持续时间也适合)。
这个问题可能与 AVVideoCompositionCoreAnimationTool()
有关。
我能够通过使用另一种技术在第二个功能中向视频添加叠加来解决这个问题。我没有使用 AVVideoCompositionCoreAnimationTool()
来堆叠图层,而是使用了 CIFilter
,如下所示:
let watermarkFilter = CIFilter(name: "CISourceOverCompositing")!
let watermarkImage = CIImage(image: watermark)!
let videoComposition = AVVideoComposition(asset: asset) { (filteringRequest) in
let source = filteringRequest.sourceImage
watermarkFilter.setValue(source, forKey: kCIInputBackgroundImageKey)
let widthScale = source.extent.width/watermarkImage.extent.width
let heightScale = source.extent.height/watermarkImage.extent.height
watermarkFilter.setValue(watermarkImage.transformed(by: .init(scaleX: widthScale, y: heightScale)), forKey: kCIInputImageKey)
filteringRequest.finish(with: watermarkFilter.outputImage!, context: nil)
}
我正在使用 AVMutableComposition()
添加曲目将多个视频文件拼接成一个,如下所示:
let compositionVideoTrack = mainComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
let soundtrackTrack = mainComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
private var insertTime = CMTime.zero
for videoAsset in arrayVideos {
try! compositionVideoTrack?.insertTimeRange(CMTimeRangeMake(start: .zero, duration: videoAsset.duration), of: videoAsset.tracks(withMediaType: .video)[0], at: insertTime)
try! soundtrackTrack?.insertTimeRange(CMTimeRangeMake(start: .zero, duration: videoAsset.duration), of: videoAsset.tracks(withMediaType: .audio)[0], at: insertTime)
insertTime = CMTimeAdd(insertTime, videoAsset.duration)
}
然后使用 AVAssetExportSession(asset: mainComposition, presetName: AVAssetExportPresetMediumQuality)
将它们导出到 .mov 文件中。
这会将拼接的视频保存到 url,我可以使用 AVAsset 访问它并显示给用户。之后,我尝试向视频添加图像叠加并再次导出。
在第二种方法中,我从 url AVAsset(url: fileUrl)
实例化了 AVAsset。并创建新的 AVMutableComposition()
。我将视频和音频轨道添加到资产的合成中:
compositionTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
compositionTrack.insertTimeRange(timeRange, of: asset.tracks(withMediaType: .video)[], at: .zero)
...
然后我使用图层和 AVVideoCompositionCoreAnimationTool()
向视频添加叠加层,如下所示:
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: videoSize)
overlayLayer.contents = watermark.cgImage
overlayLayer.contentsGravity = .resizeAspect
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: outputLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero, duration: asset.duration)
videoComposition.instructions = [instruction]
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: assetTrack)
layerInstruction.setTransform(assetTrack.preferredTransform, at: .zero)
instruction.layerInstructions = [layerInstruction]
然后我以与第一次导出相同的方式导出视频。
问题是我合并这个的时候。 如果我只使用第二种方法导出一些示例视频,则会添加视频叠加层,一切都符合预期。 如果我使用第一种方法拼接视频,则可以完美拼接视频。但是,当我结合这些方法时,生成的视频是黑色空白屏幕(音频和叠加层很好,生成的视频持续时间也适合)。
这个问题可能与 AVVideoCompositionCoreAnimationTool()
有关。
我能够通过使用另一种技术在第二个功能中向视频添加叠加来解决这个问题。我没有使用 AVVideoCompositionCoreAnimationTool()
来堆叠图层,而是使用了 CIFilter
,如下所示:
let watermarkFilter = CIFilter(name: "CISourceOverCompositing")!
let watermarkImage = CIImage(image: watermark)!
let videoComposition = AVVideoComposition(asset: asset) { (filteringRequest) in
let source = filteringRequest.sourceImage
watermarkFilter.setValue(source, forKey: kCIInputBackgroundImageKey)
let widthScale = source.extent.width/watermarkImage.extent.width
let heightScale = source.extent.height/watermarkImage.extent.height
watermarkFilter.setValue(watermarkImage.transformed(by: .init(scaleX: widthScale, y: heightScale)), forKey: kCIInputImageKey)
filteringRequest.finish(with: watermarkFilter.outputImage!, context: nil)
}