在视频上叠加图像会降低视频分辨率
Overlaying image on video reduces video resolution
当我在视频上叠加图像时,视频质量大大降低。如果我不设置导出会话的视频合成或将导出质量设置为直通,视频质量很好(但我显然没有覆盖)。
我正在传递一个本地 .mov 视频 url 来添加叠加层。
我正在使用 PHPhotoLibrary 将视频保存到相机胶卷。
使用其他一些功能来转换视频并设置其指令。
这一切看起来都很简单,但有些东西正在降低视频质量
func merge3(url: URL) {
let firstAsset = AVAsset(url: url)
// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
let mixComposition = AVMutableComposition()
// 2 - Create two video tracks
guard
let firstTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
else {
return
}
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: firstAsset.duration),
of: firstAsset.tracks(withMediaType: AVMediaType.video)[0],
at: CMTime.zero)
} catch {
print("Failed to load first track")
return
}
let s = UIScreen.main.bounds
let imglogo = UIImage(named: "django")?.scaleImageToSize(newSize: CGSize(width: 250, height: 125))
let imglayer = CALayer()
imglayer.contents = imglogo?.cgImage
imglayer.frame = CGRect(x: s.width / 2 - 125, y: s.height / 2 - 67.5
, width: 250, height: 125)
imglayer.opacity = 1.0
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: s.width, height: s.height)
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: s.width, height: s.height)
parentlayer.addSublayer(videolayer)
parentlayer.addSublayer(imglayer)
// 2.1
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero,
duration: firstAsset.duration)
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
layercomposition.renderSize = CGSize(width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
// instruction for watermark
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: firstAsset.duration)
_ = mixComposition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
let layerinstruction = VideoHelper.videoCompositionInstruction1(firstTrack, asset: firstAsset)
instruction.layerInstructions = [layerinstruction]
layercomposition.instructions = [instruction]
// 4 - Get path
guard let documentDirectory = FileManager.default.urls(for: .documentDirectory,
in: .userDomainMask).first else {
return
}
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())
let url = documentDirectory.appendingPathComponent("mergeVideo-\(date).mov")
// 5 - Create Exporter
guard let exporter = AVAssetExportSession(asset: mixComposition,
presetName: AVAssetExportPresetHighestQuality) else {
return
}
exporter.outputURL = url
exporter.outputFileType = AVFileType.mov
exporter.shouldOptimizeForNetworkUse = true
exporter.videoComposition = layercomposition
// 6 - Perform the Export
exporter.exportAsynchronously() {
DispatchQueue.main.async {
self.exportDidFinish(exporter)
}
}
}
func exportDidFinish(_ session: AVAssetExportSession) {
guard
session.status == AVAssetExportSession.Status.completed,
let outputURL = session.outputURL
else {
return
}
let saveVideoToPhotos = {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL)
}) { saved, error in
let success = saved && (error == nil)
let title = success ? "Success" : "Error"
let message = success ? "Video saved" : "Failed to save video"
let alert = UIAlertController(title: title, message: message, preferredStyle: .alert)
alert.addAction(UIAlertAction(title: "OK", style: UIAlertAction.Style.cancel, handler: nil))
self.present(alert, animated: true, completion: nil)
}
}
// Ensure permission to access Photo Library
if PHPhotoLibrary.authorizationStatus() != .authorized {
PHPhotoLibrary.requestAuthorization { status in
if status == .authorized {
saveVideoToPhotos()
}
}
} else {
saveVideoToPhotos()
}
}
static func videoCompositionInstruction1(_ track: AVCompositionTrack, asset: AVAsset)
-> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: .video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform)
var scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.width
if assetInfo.isPortrait { // not hit
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor), at: CMTime.zero)
} else { // hit
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor)
.concatenating(CGAffineTransform(translationX: 0, y: UIScreen.main.bounds.width / 4))
if assetInfo.orientation == .down { // not hit
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = assetTrack.naturalSize.height + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: yFix)
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: CMTime.zero)
}
return instruction
}
static func orientationFromTransform(_ transform: CGAffineTransform)
-> (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
您正在设置
layercomposition.renderSize = CGSize(width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
什么时候应该
layercomposition.renderSize = yourAsset.tracks(withMediaType: AVMediaTypeVideo)[0].naturalSize
第一个所做的是将分辨率设置为屏幕大小,而不是原始视频的实际大小。第二个更正为设置原始视频的分辨率。
这样想 - 你不希望你的分辨率是屏幕的大小 - 那会非常小。你想要一些原始视频的大小或一些常用视频设置的大小。
当我在视频上叠加图像时,视频质量大大降低。如果我不设置导出会话的视频合成或将导出质量设置为直通,视频质量很好(但我显然没有覆盖)。
我正在传递一个本地 .mov 视频 url 来添加叠加层。 我正在使用 PHPhotoLibrary 将视频保存到相机胶卷。 使用其他一些功能来转换视频并设置其指令。
这一切看起来都很简单,但有些东西正在降低视频质量
func merge3(url: URL) {
let firstAsset = AVAsset(url: url)
// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
let mixComposition = AVMutableComposition()
// 2 - Create two video tracks
guard
let firstTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
else {
return
}
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: firstAsset.duration),
of: firstAsset.tracks(withMediaType: AVMediaType.video)[0],
at: CMTime.zero)
} catch {
print("Failed to load first track")
return
}
let s = UIScreen.main.bounds
let imglogo = UIImage(named: "django")?.scaleImageToSize(newSize: CGSize(width: 250, height: 125))
let imglayer = CALayer()
imglayer.contents = imglogo?.cgImage
imglayer.frame = CGRect(x: s.width / 2 - 125, y: s.height / 2 - 67.5
, width: 250, height: 125)
imglayer.opacity = 1.0
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: s.width, height: s.height)
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: s.width, height: s.height)
parentlayer.addSublayer(videolayer)
parentlayer.addSublayer(imglayer)
// 2.1
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero,
duration: firstAsset.duration)
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
layercomposition.renderSize = CGSize(width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
// instruction for watermark
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: firstAsset.duration)
_ = mixComposition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
let layerinstruction = VideoHelper.videoCompositionInstruction1(firstTrack, asset: firstAsset)
instruction.layerInstructions = [layerinstruction]
layercomposition.instructions = [instruction]
// 4 - Get path
guard let documentDirectory = FileManager.default.urls(for: .documentDirectory,
in: .userDomainMask).first else {
return
}
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())
let url = documentDirectory.appendingPathComponent("mergeVideo-\(date).mov")
// 5 - Create Exporter
guard let exporter = AVAssetExportSession(asset: mixComposition,
presetName: AVAssetExportPresetHighestQuality) else {
return
}
exporter.outputURL = url
exporter.outputFileType = AVFileType.mov
exporter.shouldOptimizeForNetworkUse = true
exporter.videoComposition = layercomposition
// 6 - Perform the Export
exporter.exportAsynchronously() {
DispatchQueue.main.async {
self.exportDidFinish(exporter)
}
}
}
func exportDidFinish(_ session: AVAssetExportSession) {
guard
session.status == AVAssetExportSession.Status.completed,
let outputURL = session.outputURL
else {
return
}
let saveVideoToPhotos = {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL)
}) { saved, error in
let success = saved && (error == nil)
let title = success ? "Success" : "Error"
let message = success ? "Video saved" : "Failed to save video"
let alert = UIAlertController(title: title, message: message, preferredStyle: .alert)
alert.addAction(UIAlertAction(title: "OK", style: UIAlertAction.Style.cancel, handler: nil))
self.present(alert, animated: true, completion: nil)
}
}
// Ensure permission to access Photo Library
if PHPhotoLibrary.authorizationStatus() != .authorized {
PHPhotoLibrary.requestAuthorization { status in
if status == .authorized {
saveVideoToPhotos()
}
}
} else {
saveVideoToPhotos()
}
}
static func videoCompositionInstruction1(_ track: AVCompositionTrack, asset: AVAsset)
-> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: .video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform)
var scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.width
if assetInfo.isPortrait { // not hit
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor), at: CMTime.zero)
} else { // hit
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor)
.concatenating(CGAffineTransform(translationX: 0, y: UIScreen.main.bounds.width / 4))
if assetInfo.orientation == .down { // not hit
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = assetTrack.naturalSize.height + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: yFix)
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: CMTime.zero)
}
return instruction
}
static func orientationFromTransform(_ transform: CGAffineTransform)
-> (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
您正在设置
layercomposition.renderSize = CGSize(width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
什么时候应该
layercomposition.renderSize = yourAsset.tracks(withMediaType: AVMediaTypeVideo)[0].naturalSize
第一个所做的是将分辨率设置为屏幕大小,而不是原始视频的实际大小。第二个更正为设置原始视频的分辨率。
这样想 - 你不希望你的分辨率是屏幕的大小 - 那会非常小。你想要一些原始视频的大小或一些常用视频设置的大小。