如何在 android 中合并音频和视频
How to Combine audio and Video in android
我可以从摄像头麦克风获取音频并将其保存为 .mp3 格式以使用下面的代码,我正在尝试合并视频和音频数据并同时播放它们,我该怎么做?
File ses = new File(Environment.getExternalStorageDirectory().getAbsolutePath() + "/", "ses.mp3");
String path2 = String.valueOf(ses);
MediaRecorder recorder.setAudioSource(MediaRecorder.AudioSource.MIC);
recorder.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);
recorder.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB);
recorder.setOutputFile(path2);
try {
recorder.prepare();
} catch (IOException e) {
e.printStackTrace();
}
recorder.start();
我可以将 NV21 字节数据转换为 .h264 格式,并通过获取相机数据播放视频
private CameraProxy.CameraDataCallBack callBack = new CameraProxy.CameraDataCallBack() {
@Override
public void onDataBack(byte[] data, long length) {
encode(data);
}
编码视频过程
//Video format H264
private synchronized void encode(byte[] data) {
ByteBuffer[] inputBuffers = mMediaCodec.getInputBuffers();
ByteBuffer[] outputBuffers = mMediaCodec.getOutputBuffers();
int inputBufferIndex = mMediaCodec.dequeueInputBuffer(-1);
if (inputBufferIndex >= 0) {
ByteBuffer inputBuffer = inputBuffers[inputBufferIndex];
inputBuffer.capacity();
inputBuffer.clear();
inputBuffer.put(data);
mMediaCodec.queueInputBuffer(inputBufferIndex, 0, data.length, 0, 0);
} else {
return;
}
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
int outputBufferIndex = mMediaCodec.dequeueOutputBuffer(bufferInfo, 0);
Log.i(TAG, "outputBufferIndex-->" + outputBufferIndex);
do {
if (outputBufferIndex >= 0) {
ByteBuffer outBuffer = outputBuffers[outputBufferIndex];
System.out.println("buffer info-->" + bufferInfo.offset + "--"
+ bufferInfo.size + "--" + bufferInfo.flags + "--"
+ bufferInfo.presentationTimeUs);
byte[] outData = new byte[bufferInfo.size];
outBuffer.get(outData);
try {
if (bufferInfo.offset != 0) {
fos.write(outData, bufferInfo.offset, outData.length
- bufferInfo.offset);
} else {
fos.write(outData, 0, outData.length);
}
fos.flush();
Log.i(TAG, "out data -- > " + outData.length);
mMediaCodec.releaseOutputBuffer(outputBufferIndex, false);
outputBufferIndex = mMediaCodec.dequeueOutputBuffer(bufferInfo,
0);
} catch (IOException e) {
e.printStackTrace();
}
} else if (outputBufferIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
outputBuffers = mMediaCodec.getOutputBuffers();
} else if (outputBufferIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
MediaFormat format = mMediaCodec.getOutputFormat();
}
} while (outputBufferIndex >= 0);
}
为此,您可以使用名为 FFMPEG 的库 Android
它以参数作为命令行并处理任何视频和音频。您可能需要阅读 FFMPEG android 的一些文档,我使用此库向视频添加水印,将视频分成帧,然后向其添加水印。它做得非常好,我还测试了它以结合音频并且它很有帮助。
这是我使用的代码示例。
ffmpeg.loadBinary(new LoadBinaryResponseHandler() {
@Override
public void onStart() {
}
@Override
public void onFailure() {
}
@Override
public void onSuccess() {
final String fileP = lipModel.filePath;
String[] cmd = {"-i", lipModel.filePath, "-i", imagePath, "-preset", "ultrafast", "-filter_complex", "[1:v]scale="+width*0.21+":"+height*0.35+" [ovrl],[0:v][ovrl] overlay=x=(main_w-overlay_w):y=(main_h-overlay_h)", outputPath};
try {
// to execute "ffmpeg -version" command you just need to pass "-version"
ffmpeg.execute(cmd, new ExecuteBinaryResponseHandler() {
@Override
public void onStart() {
}
@Override
public void onProgress(String message) {
Log.d(TAG, "onProgress: " + message);
}
@Override
public void onFailure(String message) {
Log.d(TAG, "onFailure: " + message);
}
@Override
public void onSuccess(String message) {
Log.d(TAG, "onSuccess: " + message);
new AsyncDispatcher(new IAsync() {
@Override
public void IOnPreExecute() {
}
@Override
public Object IdoInBackGround(Object... params) {
File file = new File(lipModel.filePath);
if (file.exists()) {
file.delete();
}
lipModel.filePath = outputPath;
lipModel.contentUri = Uri.parse(new File(lipModel.filePath).toString()).toString();
lipSyncSerializedModel.lipSyncMap.put(lipModel.uniqueName, lipModel);
ObjectSerializer.getInstance(getApplicationContext()).serialize(SerTag.LIP_HISTORy, lipSyncSerializedModel);
HomeActivity.this.runOnUiThread(new Runnable() {
@Override
public void run() {
if (LipSyncFragment.iOnNewDataAddedRef != null) {
LipSyncFragment.iOnNewDataAddedRef.newDataAdded();
// historyFragment.favModel = favModel;
}
LipsyncHistoryFragment lipHistory = new LipsyncHistoryFragment();
File file = new File(fileP);
if (file != null) {
if(file.exists()){
file.delete();
Log.d(TAG, "run: Deleted the Orignal Video");
}
}
new FragmentUtils(HomeActivity.this,
lipHistory, R.id.fragContainer);
}
});
return null;
}
@Override
public void IOnPostExecute(Object result) {
}
});
}
@Override
public void onFinish() {
}
});
} catch (FFmpegCommandAlreadyRunningException e) {
// Handle if FFmpeg is already running
e.printStackTrace();
}
FFMPEG 文档Link:http://writingminds.github.io/ffmpeg-android-java/
FFMPEG 库:https://github.com/writingminds/ffmpeg-android-java
还有另一个图书馆做同样的事情
public class Mp4ParserAudioMuxer implements AudioMuxer {
@Override
public boolean mux(String videoFile, String audioFile, String outputFile) {
Movie video;
try {
video = new MovieCreator().build(videoFile);
} catch (RuntimeException e) {
e.printStackTrace();
return false;
} catch (IOException e) {
e.printStackTrace();
return false;
}
Movie audio;
try {
audio = new MovieCreator().build(audioFile);
} catch (IOException e) {
e.printStackTrace();
return false;
} catch (NullPointerException e) {
e.printStackTrace();
return false;
}
Track audioTrack = audio.getTracks().get(0);
video.addTrack(audioTrack);
Container out = new DefaultMp4Builder().build(video);
FileOutputStream fos;
try {
fos = new FileOutputStream(outputFile);
} catch (FileNotFoundException e) {
e.printStackTrace();
return false;
}
BufferedWritableFileByteChannel byteBufferByteChannel =
new BufferedWritableFileByteChannel(fos);
try {
out.writeContainer(byteBufferByteChannel);
byteBufferByteChannel.close();
fos.close();
} catch (IOException e) {
e.printStackTrace();
return false;
}
return true;
}
}
https://github.com/sannies/mp4parser
你也可以试试这些但是不会那么容易,
你需要了解这些api的
- MediaExtractor 从文件中提取 data/track。
- MediaCodec如果你想encode/decode
- MediaMuxer 将其混合成 mp4 文件
如果需要,您可以同时使用这三者,也可以单独使用。
您可以找到一些示例代码 here.
我可以从摄像头麦克风获取音频并将其保存为 .mp3 格式以使用下面的代码,我正在尝试合并视频和音频数据并同时播放它们,我该怎么做?
File ses = new File(Environment.getExternalStorageDirectory().getAbsolutePath() + "/", "ses.mp3");
String path2 = String.valueOf(ses);
MediaRecorder recorder.setAudioSource(MediaRecorder.AudioSource.MIC);
recorder.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);
recorder.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB);
recorder.setOutputFile(path2);
try {
recorder.prepare();
} catch (IOException e) {
e.printStackTrace();
}
recorder.start();
我可以将 NV21 字节数据转换为 .h264 格式,并通过获取相机数据播放视频
private CameraProxy.CameraDataCallBack callBack = new CameraProxy.CameraDataCallBack() {
@Override
public void onDataBack(byte[] data, long length) {
encode(data);
}
编码视频过程
//Video format H264
private synchronized void encode(byte[] data) {
ByteBuffer[] inputBuffers = mMediaCodec.getInputBuffers();
ByteBuffer[] outputBuffers = mMediaCodec.getOutputBuffers();
int inputBufferIndex = mMediaCodec.dequeueInputBuffer(-1);
if (inputBufferIndex >= 0) {
ByteBuffer inputBuffer = inputBuffers[inputBufferIndex];
inputBuffer.capacity();
inputBuffer.clear();
inputBuffer.put(data);
mMediaCodec.queueInputBuffer(inputBufferIndex, 0, data.length, 0, 0);
} else {
return;
}
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
int outputBufferIndex = mMediaCodec.dequeueOutputBuffer(bufferInfo, 0);
Log.i(TAG, "outputBufferIndex-->" + outputBufferIndex);
do {
if (outputBufferIndex >= 0) {
ByteBuffer outBuffer = outputBuffers[outputBufferIndex];
System.out.println("buffer info-->" + bufferInfo.offset + "--"
+ bufferInfo.size + "--" + bufferInfo.flags + "--"
+ bufferInfo.presentationTimeUs);
byte[] outData = new byte[bufferInfo.size];
outBuffer.get(outData);
try {
if (bufferInfo.offset != 0) {
fos.write(outData, bufferInfo.offset, outData.length
- bufferInfo.offset);
} else {
fos.write(outData, 0, outData.length);
}
fos.flush();
Log.i(TAG, "out data -- > " + outData.length);
mMediaCodec.releaseOutputBuffer(outputBufferIndex, false);
outputBufferIndex = mMediaCodec.dequeueOutputBuffer(bufferInfo,
0);
} catch (IOException e) {
e.printStackTrace();
}
} else if (outputBufferIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
outputBuffers = mMediaCodec.getOutputBuffers();
} else if (outputBufferIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
MediaFormat format = mMediaCodec.getOutputFormat();
}
} while (outputBufferIndex >= 0);
}
为此,您可以使用名为 FFMPEG 的库 Android 它以参数作为命令行并处理任何视频和音频。您可能需要阅读 FFMPEG android 的一些文档,我使用此库向视频添加水印,将视频分成帧,然后向其添加水印。它做得非常好,我还测试了它以结合音频并且它很有帮助。
这是我使用的代码示例。
ffmpeg.loadBinary(new LoadBinaryResponseHandler() {
@Override
public void onStart() {
}
@Override
public void onFailure() {
}
@Override
public void onSuccess() {
final String fileP = lipModel.filePath;
String[] cmd = {"-i", lipModel.filePath, "-i", imagePath, "-preset", "ultrafast", "-filter_complex", "[1:v]scale="+width*0.21+":"+height*0.35+" [ovrl],[0:v][ovrl] overlay=x=(main_w-overlay_w):y=(main_h-overlay_h)", outputPath};
try {
// to execute "ffmpeg -version" command you just need to pass "-version"
ffmpeg.execute(cmd, new ExecuteBinaryResponseHandler() {
@Override
public void onStart() {
}
@Override
public void onProgress(String message) {
Log.d(TAG, "onProgress: " + message);
}
@Override
public void onFailure(String message) {
Log.d(TAG, "onFailure: " + message);
}
@Override
public void onSuccess(String message) {
Log.d(TAG, "onSuccess: " + message);
new AsyncDispatcher(new IAsync() {
@Override
public void IOnPreExecute() {
}
@Override
public Object IdoInBackGround(Object... params) {
File file = new File(lipModel.filePath);
if (file.exists()) {
file.delete();
}
lipModel.filePath = outputPath;
lipModel.contentUri = Uri.parse(new File(lipModel.filePath).toString()).toString();
lipSyncSerializedModel.lipSyncMap.put(lipModel.uniqueName, lipModel);
ObjectSerializer.getInstance(getApplicationContext()).serialize(SerTag.LIP_HISTORy, lipSyncSerializedModel);
HomeActivity.this.runOnUiThread(new Runnable() {
@Override
public void run() {
if (LipSyncFragment.iOnNewDataAddedRef != null) {
LipSyncFragment.iOnNewDataAddedRef.newDataAdded();
// historyFragment.favModel = favModel;
}
LipsyncHistoryFragment lipHistory = new LipsyncHistoryFragment();
File file = new File(fileP);
if (file != null) {
if(file.exists()){
file.delete();
Log.d(TAG, "run: Deleted the Orignal Video");
}
}
new FragmentUtils(HomeActivity.this,
lipHistory, R.id.fragContainer);
}
});
return null;
}
@Override
public void IOnPostExecute(Object result) {
}
});
}
@Override
public void onFinish() {
}
});
} catch (FFmpegCommandAlreadyRunningException e) {
// Handle if FFmpeg is already running
e.printStackTrace();
}
FFMPEG 文档Link:http://writingminds.github.io/ffmpeg-android-java/
FFMPEG 库:https://github.com/writingminds/ffmpeg-android-java
还有另一个图书馆做同样的事情
public class Mp4ParserAudioMuxer implements AudioMuxer {
@Override
public boolean mux(String videoFile, String audioFile, String outputFile) {
Movie video;
try {
video = new MovieCreator().build(videoFile);
} catch (RuntimeException e) {
e.printStackTrace();
return false;
} catch (IOException e) {
e.printStackTrace();
return false;
}
Movie audio;
try {
audio = new MovieCreator().build(audioFile);
} catch (IOException e) {
e.printStackTrace();
return false;
} catch (NullPointerException e) {
e.printStackTrace();
return false;
}
Track audioTrack = audio.getTracks().get(0);
video.addTrack(audioTrack);
Container out = new DefaultMp4Builder().build(video);
FileOutputStream fos;
try {
fos = new FileOutputStream(outputFile);
} catch (FileNotFoundException e) {
e.printStackTrace();
return false;
}
BufferedWritableFileByteChannel byteBufferByteChannel =
new BufferedWritableFileByteChannel(fos);
try {
out.writeContainer(byteBufferByteChannel);
byteBufferByteChannel.close();
fos.close();
} catch (IOException e) {
e.printStackTrace();
return false;
}
return true;
}
}
https://github.com/sannies/mp4parser
你也可以试试这些但是不会那么容易, 你需要了解这些api的
- MediaExtractor 从文件中提取 data/track。
- MediaCodec如果你想encode/decode
- MediaMuxer 将其混合成 mp4 文件
如果需要,您可以同时使用这三者,也可以单独使用。 您可以找到一些示例代码 here.