iOS:使用 Swift 修剪音频文件?
iOS: trimming audio files with Swift?
我必须合并音频文件和录制的 voice.For 例如录制的语音是 47 秒。我必须将 4 分钟的音频歌曲剪切或 trim 到 47 秒。并合并音频文件。
var url:NSURL?
if self.audioRecorder != nil
{
url = self.audioRecorder!.url
}
else
{
url = self.soundFileURL!
}
print("playing \(url)")
do
{
self.newplayer = try AVPlayer(URL: url!)
let avAsset = AVURLAsset(URL: url!, options: nil)
print("\(avAsset)")
let audioDuration = avAsset.duration
let totalSeconds = CMTimeGetSeconds(audioDuration)
let hours = floor(totalSeconds / 3600)
var minutes = floor(totalSeconds % 3600 / 60)
var seconds = floor(totalSeconds % 3600 % 60)
print("hours = \(hours),minutes = \(minutes),seconds = \(seconds)")}
这是输出://小时 = 0.0,分钟 = 0.0,秒 = 42.0
对于 trim 方法,我刚刚尝试了这个:如何设置准确的持续时间、开始时间和结束时间以及新的 url :
func exportAsset(asset:AVAsset, fileName:String)
{
let documentsDirectory = NSFileManager.defaultManager().URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)[0]
let trimmedSoundFileURL = documentsDirectory.URLByAppendingPathComponent(fileName)
print("saving to \(trimmedSoundFileURL!.absoluteString)")
let filemanager = NSFileManager.defaultManager()
if filemanager.fileExistsAtPath(trimmedSoundFileURL!.absoluteString!) {
print("sound exists")
}
let exporter = AVAssetExportSession(asset: asset, presetName: AVAssetExportPresetAppleM4A)
exporter!.outputFileType = AVFileTypeAppleM4A
exporter!.outputURL = trimmedSoundFileURL
let duration = CMTimeGetSeconds(asset.duration)
if (duration < 5.0) {
print("sound is not long enough")
return
}
// e.g. the first 5 seconds
let startTime = CMTimeMake(0, 1)
let stopTime = CMTimeMake(5, 1)
let exportTimeRange = CMTimeRangeFromTimeToTime(startTime, stopTime)
exporter!.timeRange = exportTimeRange
// do it
exporter!.exportAsynchronouslyWithCompletionHandler({
switch exporter!.status {
case AVAssetExportSessionStatus.Failed:
print("export failed \(exporter!.error)")
case AVAssetExportSessionStatus.Cancelled:
print("export cancelled \(exporter!.error)")
default:
print("export complete")
}
})
}
我终于找到了 question.It 工作正常的答案...我附上了下面的代码。我在 it.It 中添加了 trim 音频代码,这对那些试图合并和 trim 音频(swift2.3)的人很有用:
func mixAudio()
{
let currentTime = CFAbsoluteTimeGetCurrent()
let composition = AVMutableComposition()
let compositionAudioTrack = composition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
compositionAudioTrack.preferredVolume = 0.8
let avAsset = AVURLAsset.init(URL: soundFileURL, options: nil)
print("\(avAsset)")
var tracks = avAsset.tracksWithMediaType(AVMediaTypeAudio)
let clipAudioTrack = tracks[0]
do {
try compositionAudioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, avAsset.duration), ofTrack: clipAudioTrack, atTime: kCMTimeZero)
}
catch _ {
}
let compositionAudioTrack1 = composition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
compositionAudioTrack.preferredVolume = 0.8
let avAsset1 = AVURLAsset.init(URL: soundFileURL1)
print(avAsset1)
var tracks1 = avAsset1.tracksWithMediaType(AVMediaTypeAudio)
let clipAudioTrack1 = tracks1[0]
do {
try compositionAudioTrack1.insertTimeRange(CMTimeRangeMake(kCMTimeZero, avAsset1.duration), ofTrack: clipAudioTrack1, atTime: kCMTimeZero)
}
catch _ {
}
var paths = NSSearchPathForDirectoriesInDomains(.LibraryDirectory, .UserDomainMask, true)
let CachesDirectory = paths[0]
let strOutputFilePath = CachesDirectory.stringByAppendingString("/Fav")
print(" strOutputFilePath is \n \(strOutputFilePath)")
let requiredOutputPath = CachesDirectory.stringByAppendingString("/Fav.m4a")
print(" requiredOutputPath is \n \(requiredOutputPath)")
soundFile1 = NSURL.fileURLWithPath(requiredOutputPath)
print(" OUtput path is \n \(soundFile1)")
var audioDuration = avAsset.duration
var totalSeconds = CMTimeGetSeconds(audioDuration)
var hours = floor(totalSeconds / 3600)
var minutes = floor(totalSeconds % 3600 / 60)
var seconds = Int64(totalSeconds % 3600 % 60)
print("hours = \(hours), minutes = \(minutes), seconds = \(seconds)")
let recordSettings:[String : AnyObject] = [
AVFormatIDKey: Int(kAudioFormatMPEG4AAC),
AVSampleRateKey: 12000,
AVNumberOfChannelsKey: 1,
AVEncoderAudioQualityKey: AVAudioQuality.Low.rawValue
]
do {
audioRecorder = try AVAudioRecorder(URL: soundFile1, settings: recordSettings)
audioRecorder!.delegate = self
audioRecorder!.meteringEnabled = true
audioRecorder!.prepareToRecord()
}
catch let error as NSError
{
audioRecorder = nil
print(error.localizedDescription)
}
do {
try NSFileManager.defaultManager().removeItemAtURL(soundFile1)
}
catch _ {
}
let exporter = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetAppleM4A)
exporter!.outputURL = soundFile1
exporter!.outputFileType = AVFileTypeAppleM4A
let duration = CMTimeGetSeconds(avAsset1.duration)
print(duration)
if (duration < 5.0) {
print("sound is not long enough")
return
}
// e.g. the first 30 seconds
let startTime = CMTimeMake(0, 1)
let stopTime = CMTimeMake(seconds,1)
let exportTimeRange = CMTimeRangeFromTimeToTime(startTime, stopTime)
print(exportTimeRange)
exporter!.timeRange = exportTimeRange
print(exporter!.timeRange)
exporter!.exportAsynchronouslyWithCompletionHandler
{() -> Void in
print(" OUtput path is \n \(requiredOutputPath)")
print("export complete: \(CFAbsoluteTimeGetCurrent() - currentTime)")
var url:NSURL?
if self.audioRecorder != nil
{
url = self.audioRecorder!.url
}
else
{
url = self.soundFile1!
print(url)
}
print("playing \(url)")
do
{
print(self.soundFile1)
print(" OUtput path is \n \(requiredOutputPath)")
self.setSessionPlayback()
do {
self.optData = try NSData(contentsOfURL: self.soundFile1!, options: NSDataReadingOptions.DataReadingMappedIfSafe)
print(self.optData)
self.recordencryption = self.optData.base64EncodedStringWithOptions(NSDataBase64EncodingOptions())
// print(self.recordencryption)
self.myImageUploadRequest()
}
self.wasteplayer = try AVAudioPlayer(contentsOfURL: self.soundFile1)
self.wasteplayer.numberOfLoops = 0
self.wasteplayer.play()
}
catch _
{
}
}
}
基于之前回答的简单声音trim函数。
static func trimmSound( inUrl:URL, outUrl:URL,timeRange:CMTimeRange, callBack:@escaping () -> Void){
let startTime = timeRange.start
let duration = timeRange.duration
let audioAsset = AVAsset(url: inUrl)
let composition = AVMutableComposition()
let compositionAudioTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
let sourceAudioTrack = audioAsset.tracks(withMediaType: AVMediaType.audio).first!
do {
try compositionAudioTrack?.insertTimeRange(CMTimeRangeMake(start: startTime, duration: duration), of: sourceAudioTrack, at: .zero)
} catch {
print(error.localizedDescription)
return
}
let exporter = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetAppleM4A)
exporter!.outputURL = outUrl
exporter!.outputFileType = AVFileType.m4a
exporter!.shouldOptimizeForNetworkUse = true
exporter!.exportAsynchronously {
DispatchQueue.main.async {
callBack()
}
}
}
}
我必须合并音频文件和录制的 voice.For 例如录制的语音是 47 秒。我必须将 4 分钟的音频歌曲剪切或 trim 到 47 秒。并合并音频文件。
var url:NSURL?
if self.audioRecorder != nil
{
url = self.audioRecorder!.url
}
else
{
url = self.soundFileURL!
}
print("playing \(url)")
do
{
self.newplayer = try AVPlayer(URL: url!)
let avAsset = AVURLAsset(URL: url!, options: nil)
print("\(avAsset)")
let audioDuration = avAsset.duration
let totalSeconds = CMTimeGetSeconds(audioDuration)
let hours = floor(totalSeconds / 3600)
var minutes = floor(totalSeconds % 3600 / 60)
var seconds = floor(totalSeconds % 3600 % 60)
print("hours = \(hours),minutes = \(minutes),seconds = \(seconds)")}
这是输出://小时 = 0.0,分钟 = 0.0,秒 = 42.0
对于 trim 方法,我刚刚尝试了这个:如何设置准确的持续时间、开始时间和结束时间以及新的 url :
func exportAsset(asset:AVAsset, fileName:String)
{
let documentsDirectory = NSFileManager.defaultManager().URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)[0]
let trimmedSoundFileURL = documentsDirectory.URLByAppendingPathComponent(fileName)
print("saving to \(trimmedSoundFileURL!.absoluteString)")
let filemanager = NSFileManager.defaultManager()
if filemanager.fileExistsAtPath(trimmedSoundFileURL!.absoluteString!) {
print("sound exists")
}
let exporter = AVAssetExportSession(asset: asset, presetName: AVAssetExportPresetAppleM4A)
exporter!.outputFileType = AVFileTypeAppleM4A
exporter!.outputURL = trimmedSoundFileURL
let duration = CMTimeGetSeconds(asset.duration)
if (duration < 5.0) {
print("sound is not long enough")
return
}
// e.g. the first 5 seconds
let startTime = CMTimeMake(0, 1)
let stopTime = CMTimeMake(5, 1)
let exportTimeRange = CMTimeRangeFromTimeToTime(startTime, stopTime)
exporter!.timeRange = exportTimeRange
// do it
exporter!.exportAsynchronouslyWithCompletionHandler({
switch exporter!.status {
case AVAssetExportSessionStatus.Failed:
print("export failed \(exporter!.error)")
case AVAssetExportSessionStatus.Cancelled:
print("export cancelled \(exporter!.error)")
default:
print("export complete")
}
})
}
我终于找到了 question.It 工作正常的答案...我附上了下面的代码。我在 it.It 中添加了 trim 音频代码,这对那些试图合并和 trim 音频(swift2.3)的人很有用:
func mixAudio()
{
let currentTime = CFAbsoluteTimeGetCurrent()
let composition = AVMutableComposition()
let compositionAudioTrack = composition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
compositionAudioTrack.preferredVolume = 0.8
let avAsset = AVURLAsset.init(URL: soundFileURL, options: nil)
print("\(avAsset)")
var tracks = avAsset.tracksWithMediaType(AVMediaTypeAudio)
let clipAudioTrack = tracks[0]
do {
try compositionAudioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, avAsset.duration), ofTrack: clipAudioTrack, atTime: kCMTimeZero)
}
catch _ {
}
let compositionAudioTrack1 = composition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
compositionAudioTrack.preferredVolume = 0.8
let avAsset1 = AVURLAsset.init(URL: soundFileURL1)
print(avAsset1)
var tracks1 = avAsset1.tracksWithMediaType(AVMediaTypeAudio)
let clipAudioTrack1 = tracks1[0]
do {
try compositionAudioTrack1.insertTimeRange(CMTimeRangeMake(kCMTimeZero, avAsset1.duration), ofTrack: clipAudioTrack1, atTime: kCMTimeZero)
}
catch _ {
}
var paths = NSSearchPathForDirectoriesInDomains(.LibraryDirectory, .UserDomainMask, true)
let CachesDirectory = paths[0]
let strOutputFilePath = CachesDirectory.stringByAppendingString("/Fav")
print(" strOutputFilePath is \n \(strOutputFilePath)")
let requiredOutputPath = CachesDirectory.stringByAppendingString("/Fav.m4a")
print(" requiredOutputPath is \n \(requiredOutputPath)")
soundFile1 = NSURL.fileURLWithPath(requiredOutputPath)
print(" OUtput path is \n \(soundFile1)")
var audioDuration = avAsset.duration
var totalSeconds = CMTimeGetSeconds(audioDuration)
var hours = floor(totalSeconds / 3600)
var minutes = floor(totalSeconds % 3600 / 60)
var seconds = Int64(totalSeconds % 3600 % 60)
print("hours = \(hours), minutes = \(minutes), seconds = \(seconds)")
let recordSettings:[String : AnyObject] = [
AVFormatIDKey: Int(kAudioFormatMPEG4AAC),
AVSampleRateKey: 12000,
AVNumberOfChannelsKey: 1,
AVEncoderAudioQualityKey: AVAudioQuality.Low.rawValue
]
do {
audioRecorder = try AVAudioRecorder(URL: soundFile1, settings: recordSettings)
audioRecorder!.delegate = self
audioRecorder!.meteringEnabled = true
audioRecorder!.prepareToRecord()
}
catch let error as NSError
{
audioRecorder = nil
print(error.localizedDescription)
}
do {
try NSFileManager.defaultManager().removeItemAtURL(soundFile1)
}
catch _ {
}
let exporter = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetAppleM4A)
exporter!.outputURL = soundFile1
exporter!.outputFileType = AVFileTypeAppleM4A
let duration = CMTimeGetSeconds(avAsset1.duration)
print(duration)
if (duration < 5.0) {
print("sound is not long enough")
return
}
// e.g. the first 30 seconds
let startTime = CMTimeMake(0, 1)
let stopTime = CMTimeMake(seconds,1)
let exportTimeRange = CMTimeRangeFromTimeToTime(startTime, stopTime)
print(exportTimeRange)
exporter!.timeRange = exportTimeRange
print(exporter!.timeRange)
exporter!.exportAsynchronouslyWithCompletionHandler
{() -> Void in
print(" OUtput path is \n \(requiredOutputPath)")
print("export complete: \(CFAbsoluteTimeGetCurrent() - currentTime)")
var url:NSURL?
if self.audioRecorder != nil
{
url = self.audioRecorder!.url
}
else
{
url = self.soundFile1!
print(url)
}
print("playing \(url)")
do
{
print(self.soundFile1)
print(" OUtput path is \n \(requiredOutputPath)")
self.setSessionPlayback()
do {
self.optData = try NSData(contentsOfURL: self.soundFile1!, options: NSDataReadingOptions.DataReadingMappedIfSafe)
print(self.optData)
self.recordencryption = self.optData.base64EncodedStringWithOptions(NSDataBase64EncodingOptions())
// print(self.recordencryption)
self.myImageUploadRequest()
}
self.wasteplayer = try AVAudioPlayer(contentsOfURL: self.soundFile1)
self.wasteplayer.numberOfLoops = 0
self.wasteplayer.play()
}
catch _
{
}
}
}
基于之前回答的简单声音trim函数。
static func trimmSound( inUrl:URL, outUrl:URL,timeRange:CMTimeRange, callBack:@escaping () -> Void){
let startTime = timeRange.start
let duration = timeRange.duration
let audioAsset = AVAsset(url: inUrl)
let composition = AVMutableComposition()
let compositionAudioTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
let sourceAudioTrack = audioAsset.tracks(withMediaType: AVMediaType.audio).first!
do {
try compositionAudioTrack?.insertTimeRange(CMTimeRangeMake(start: startTime, duration: duration), of: sourceAudioTrack, at: .zero)
} catch {
print(error.localizedDescription)
return
}
let exporter = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetAppleM4A)
exporter!.outputURL = outUrl
exporter!.outputFileType = AVFileType.m4a
exporter!.shouldOptimizeForNetworkUse = true
exporter!.exportAsynchronously {
DispatchQueue.main.async {
callBack()
}
}
}
}