给视频加水印特别慢
Adding watermark to video is extremely slow
我正在使用 AVComposition 为视频渲染水印。这个过程大约需要 15 秒,这对于 20 秒的视频来说似乎不太合适。
我的导出设置是:
let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = outputPath
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainCompositionInst
DispatchQueue.main.async {
exporter?.exportAsynchronously(completionHandler: {
if exporter?.status == AVAssetExportSessionStatus.completed {
completion(true, exporter)
}else{
completion(false, exporter)
}
})
}
我是这样加水印的:
//Creating image layer
let overlayLayer = CALayer()
let overlayImage: UIImage = image
overlayLayer.contents = overlayImage.cgImage
overlayLayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
overlayLayer.contentsGravity = kCAGravityResizeAspectFill
overlayLayer.masksToBounds = true
//Creating parent and video layer
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
parentLayer.addSublayer(videoLayer)
parentLayer.addSublayer(overlayLayer)
//Adding those layers to video
composition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
}
这就是我最终转换视频的方式:
let videoLayerInstruction = AVMutableVideoCompositionLayerInstruction.init(assetTrack: videoTrack!)
let videoAssetTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
var videoAssetOrientation = UIImageOrientation.up
var isVideoAssetPortrait = false
let videoTransform = videoAssetTrack.preferredTransform
if videoTransform.a == 0 && videoTransform.b == 1.0 && videoTransform.c == -1.0 && videoTransform.d == 0 {
videoAssetOrientation = .right
isVideoAssetPortrait = true
}
if videoTransform.a == 0 && videoTransform.b == -1.0 && videoTransform.c == 1.0 && videoTransform.d == 0 {
videoAssetOrientation = .left
isVideoAssetPortrait = true
}
if videoTransform.a == 1.0 && videoTransform.b == 0 && videoTransform.c == 0 && videoTransform.d == 1.0 {
videoAssetOrientation = .up
}
if videoTransform.a == -1.0 && videoTransform.b == 0 && videoTransform.c == 0 && videoTransform.d == -1.0 {
videoAssetOrientation = .down
}
videoLayerInstruction.setTransform(videoAssetTrack.preferredTransform, at: kCMTimeZero)
//Add instructions
mainInstruction.layerInstructions = [videoLayerInstruction]
let mainCompositionInst = AVMutableVideoComposition()
let naturalSize : CGSize!
if isVideoAssetPortrait {
naturalSize = CGSize(width: videoAssetTrack.naturalSize.height, height: videoAssetTrack.naturalSize.width)
} else {
naturalSize = videoAssetTrack.naturalSize
}
所以我现在的问题是,如何提高将水印合并到我的视频的性能? 15 秒对于任何类型的最终用户来说都是完全不能接受的。此外,我需要将此视频传输到互联网上,因此加载屏幕将显示它的美丽超过总共大约二十秒。
根据 Apple Documentation,尝试使用 class AVAsynchronousCIImageFilteringRequest
Overview
You use this class when creating a composition for Core Image filtering with the init(asset:applyingCIFiltersWithHandler:) method. In that method call, you provide a block to be called by AVFoundation as it processes each frame of video, and the block’s sole parameter is a AVAsynchronousCIImageFilteringRequest object. Use that object both to the video frame image to be filtered and allows you to return a filtered image to AVFoundation for display or export. Listing 1 shows an example of applying a filter to an asset.
let filter = CIFilter(name: "CIGaussianBlur")!
let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
// Clamp to avoid blurring transparent pixels at the image edges
let source = request.sourceImage.imageByClampingToExtent()
filter.setValue(source, forKey: kCIInputImageKey)
// Vary filter parameters based on video timing
let seconds = CMTimeGetSeconds(request.compositionTime)
filter.setValue(seconds * 10.0, forKey: kCIInputRadiusKey)
// Crop the blurred output to the bounds of the original image
let output = filter.outputImage!.imageByCroppingToRect(request.sourceImage.extent)
// Provide the filter output to the composition
request.finishWithImage(output, context: nil)
})
有一个 tutorial in Objective C 可能也是一个很好的资源。
我正在使用 AVComposition 为视频渲染水印。这个过程大约需要 15 秒,这对于 20 秒的视频来说似乎不太合适。 我的导出设置是:
let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = outputPath
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainCompositionInst
DispatchQueue.main.async {
exporter?.exportAsynchronously(completionHandler: {
if exporter?.status == AVAssetExportSessionStatus.completed {
completion(true, exporter)
}else{
completion(false, exporter)
}
})
}
我是这样加水印的:
//Creating image layer
let overlayLayer = CALayer()
let overlayImage: UIImage = image
overlayLayer.contents = overlayImage.cgImage
overlayLayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
overlayLayer.contentsGravity = kCAGravityResizeAspectFill
overlayLayer.masksToBounds = true
//Creating parent and video layer
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
parentLayer.addSublayer(videoLayer)
parentLayer.addSublayer(overlayLayer)
//Adding those layers to video
composition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
}
这就是我最终转换视频的方式:
let videoLayerInstruction = AVMutableVideoCompositionLayerInstruction.init(assetTrack: videoTrack!)
let videoAssetTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
var videoAssetOrientation = UIImageOrientation.up
var isVideoAssetPortrait = false
let videoTransform = videoAssetTrack.preferredTransform
if videoTransform.a == 0 && videoTransform.b == 1.0 && videoTransform.c == -1.0 && videoTransform.d == 0 {
videoAssetOrientation = .right
isVideoAssetPortrait = true
}
if videoTransform.a == 0 && videoTransform.b == -1.0 && videoTransform.c == 1.0 && videoTransform.d == 0 {
videoAssetOrientation = .left
isVideoAssetPortrait = true
}
if videoTransform.a == 1.0 && videoTransform.b == 0 && videoTransform.c == 0 && videoTransform.d == 1.0 {
videoAssetOrientation = .up
}
if videoTransform.a == -1.0 && videoTransform.b == 0 && videoTransform.c == 0 && videoTransform.d == -1.0 {
videoAssetOrientation = .down
}
videoLayerInstruction.setTransform(videoAssetTrack.preferredTransform, at: kCMTimeZero)
//Add instructions
mainInstruction.layerInstructions = [videoLayerInstruction]
let mainCompositionInst = AVMutableVideoComposition()
let naturalSize : CGSize!
if isVideoAssetPortrait {
naturalSize = CGSize(width: videoAssetTrack.naturalSize.height, height: videoAssetTrack.naturalSize.width)
} else {
naturalSize = videoAssetTrack.naturalSize
}
所以我现在的问题是,如何提高将水印合并到我的视频的性能? 15 秒对于任何类型的最终用户来说都是完全不能接受的。此外,我需要将此视频传输到互联网上,因此加载屏幕将显示它的美丽超过总共大约二十秒。
根据 Apple Documentation,尝试使用 class AVAsynchronousCIImageFilteringRequest
Overview
You use this class when creating a composition for Core Image filtering with the init(asset:applyingCIFiltersWithHandler:) method. In that method call, you provide a block to be called by AVFoundation as it processes each frame of video, and the block’s sole parameter is a AVAsynchronousCIImageFilteringRequest object. Use that object both to the video frame image to be filtered and allows you to return a filtered image to AVFoundation for display or export. Listing 1 shows an example of applying a filter to an asset.
let filter = CIFilter(name: "CIGaussianBlur")! let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in // Clamp to avoid blurring transparent pixels at the image edges let source = request.sourceImage.imageByClampingToExtent() filter.setValue(source, forKey: kCIInputImageKey) // Vary filter parameters based on video timing let seconds = CMTimeGetSeconds(request.compositionTime) filter.setValue(seconds * 10.0, forKey: kCIInputRadiusKey) // Crop the blurred output to the bounds of the original image let output = filter.outputImage!.imageByCroppingToRect(request.sourceImage.extent) // Provide the filter output to the composition request.finishWithImage(output, context: nil) })
有一个 tutorial in Objective C 可能也是一个很好的资源。