如何在 Unity 中使用带有 Agora.io 的场景相机

How to use scene camera with Agora.io in Unity

我在 Unity 中集成了 Agora.io,这样我就可以在我的虚拟现实应用程序中与网页上的外部用户进行视频通话。 VR 用户可以看到网站用户,但是网站用户看不到 VR 用户,因为没有可用的物理摄像头可以使用。有没有办法在 Agora 视频源中使用场景相机?这意味着网站用户将能够看到 VR 用户的世界

是的。虽然之前没有做过VR的项目,但是概念应该是有的。您可以使用外部视频源发送视频的任何帧,就好像它是从物理相机发送的一样。对于场景相机,您可以使用 RenderTexture 来输出相机源,并从 RenderTexture 中提取原始数据。所以步骤是:

  1. 将相机设置为输出到 RenderTexture(如果需要,加上在本地某处显示此 RenderTexture 的逻辑。)
  2. 同时确保在设置 Agora RTC 引擎时,使用此调用启用外部视频源:

    mRtcEngine.SetExternalVideoSource(true, false);

  3. 在每一帧,从 RenderTexture 中提取原始图像数据

  4. 发送原始帧数据到SDK函数rtc.pushVideoFrame()

您可以在此处找到最后一步的代码 https://gist.github.com/icywind/92053d0983e713515c64d5c532ebee21

我修改了 Agora io 编辑的共享屏幕代码以提取渲染纹理。问题是当我的渲染纹理是深度摄像头视频流时,我只能在接收器上看到白屏或黑屏。

using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using agora_gaming_rtc;
using UnityEngine.UI;
using System.Globalization;
using System.Runtime.InteropServices;
using System;
public class ShareScreen : MonoBehaviour
{
    Texture2D mTexture;
    Rect mRect;
    [SerializeField]
    private string appId = "Your_AppID";
    [SerializeField]
    private string channelName = "agora";
    public IRtcEngine mRtcEngine;
    int i = 100;
    public RenderTexture depthMap;
    void Start()
    {
        Debug.Log("ScreenShare Activated");
        mRtcEngine = IRtcEngine.getEngine(appId);

        mRtcEngine.SetLogFilter(LOG_FILTER.DEBUG | LOG_FILTER.INFO | LOG_FILTER.WARNING | LOG_FILTER.ERROR | LOG_FILTER.CRITICAL);

    mRtcEngine.SetParameters("{\"rtc.log_filter\": 65535}");

    mRtcEngine.SetExternalVideoSource(true, false);

    mRtcEngine.EnableVideo();

    mRtcEngine.EnableVideoObserver();

    mRtcEngine.JoinChannel(channelName, null, 0);

    mRect = new Rect(0, 0, depthMap.width, depthMap.height); 

    mTexture = new Texture2D((int)mRect.width, (int)mRect.height, TextureFormat.RGBA32, false);
}
void Update()
{
    //Start the screenshare Coroutine
    StartCoroutine(shareScreen());
}
//Screen Share
IEnumerator shareScreen()
{
    yield return new WaitForEndOfFrame();
    //FB activate automaticaly the render texture for the copy
    RenderTexture.active = depthMap;
    //Read the Pixels inside the Rectangle
    mTexture.ReadPixels(mRect, 0, 0);
    //Apply the Pixels read from the rectangle to the texture
    mTexture.Apply();


    // Get the Raw Texture data from the the from the texture and apply it to an array of bytes
    byte[] bytes = mTexture.GetRawTextureData();
    // Make enough space for the bytes array
    int size = Marshal.SizeOf(bytes[0]) * bytes.Length;
    // Check to see if there is an engine instance already created
    IRtcEngine rtc = IRtcEngine.QueryEngine();
    //if the engine is present
    if (rtc != null)
    {
        //Create a new external video frame
        ExternalVideoFrame externalVideoFrame = new ExternalVideoFrame();
        //Set the buffer type of the video frame
        externalVideoFrame.type = ExternalVideoFrame.VIDEO_BUFFER_TYPE.VIDEO_BUFFER_RAW_DATA;
        // Set the video pixel format
        externalVideoFrame.format = ExternalVideoFrame.VIDEO_PIXEL_FORMAT.VIDEO_PIXEL_BGRA;
        //apply raw data you are pulling from the rectangle you created earlier to the video frame
        externalVideoFrame.buffer = bytes;
        //Set the width of the video frame (in pixels)
        externalVideoFrame.stride = (int)mRect.width;
        //Set the height of the video frame
        externalVideoFrame.height = (int)mRect.height;
        //Remove pixels from the sides of the frame
        externalVideoFrame.cropLeft = 0;
        externalVideoFrame.cropTop = 0;
        externalVideoFrame.cropRight = 0;
        externalVideoFrame.cropBottom = 0;
        //Rotate the video frame (0, 90, 180, or 270)
        externalVideoFrame.rotation = 180;
        // increment i with the video timestamp
        externalVideoFrame.timestamp = i++;
        //Push the external video frame with the frame we just created
        int a = rtc.PushVideoFrame(externalVideoFrame);
        Debug.Log(" pushVideoFrame =       " + a);
    }
}

}