A 帧:如何将第二台摄像机渲染到 canvas

A-frame: How to render 2nd camera to canvas

我有一个场景有两个摄像头——一个主摄像头和一个平面视图摄像头。 如何将平面视图相机渲染到连接到主相机的平面?

我知道我需要按照 Aframe 文档中的说明将一个 draw-canvas 组件附加到平面上 - https://aframe.io/docs/0.5.0/components/material.html#canvas-textures。但这就是我所能得到的。

<script>
  AFRAME.registerComponent('draw-canvas', {
  schema: {default: ''},

  init: function () {
    this.canvas = document.getElementById(this.data);
    this.ctx = this.canvas.getContext('2d');

    // Draw on canvas...
   }
  });
</script>

是否有关于如何使用 Aframe 将相机渲染为 canvas 纹理的工作示例?

请看我的演示 - https://codepen.io/MannyMeadows/pen/OgxwGm

我找到了一个旁观者组件来解决这个问题...

AFRAME.registerComponent('spectator', {
  'schema': {
    canvas: {
      type: 'string',
      default: ''
    },
    // desired FPS of spectator dislay
    fps: {
      type: 'number',
      default: '10.0'
    }
  },
  'init': function() {
    var targetEl = document.querySelector(this.data.canvas);

    this.counter = 0;
    this.renderer = new THREE.WebGLRenderer({
      antialias: false,
      alpha: true
    });

    this.renderer.setPixelRatio(window.devicePixelRatio);
    this.el.object3DMap.camera.aspect = 1;
    this.el.object3DMap.camera.updateProjectionMatrix()
    this.renderer.setSize(targetEl.offsetWidth, targetEl.offsetHeight);

    // creates spectator canvas
    targetEl.appendChild(this.renderer.domElement);
  },
  'tick': function(time, timeDelta) {

    var loopFPS = 1000.0 / timeDelta;
    var hmdIsXFasterThanDesiredFPS = loopFPS / this.data.fps;
    var renderEveryNthFrame = Math.round(hmdIsXFasterThanDesiredFPS);

    if (this.counter % renderEveryNthFrame === 0) {
      this.render(timeDelta);
    }
    this.counter += 1;
  },
  'render': function() {
    this.renderer.render(this.el.sceneEl.object3D, this.el.object3DMap.camera);
  }
});
.container {
  position: absolute;
  width: 300px;
  height: 300px;
  top: 10px;
  z-index: 3;
  right: 20px;
}
<html>

<head>
  <script src="https://aframe.io/releases/0.6.0/aframe.min.js"></script>
  <script src="//cdn.rawgit.com/donmccurdy/aframe-extras/v3.8.5/dist/aframe-extras.min.js"></script>
</head>

<body>
  <a-scene stats>
    <a-assets>
      <img id="map-tex" src="https://cdn0.iconfinder.com/data/icons/architecture-construction/128/1-512.png" crossorigin>
    </a-assets>
    <!-- Scene geometry -->
    <a-sphere position="0 1.25 -1" radius="1.25" color="#EF2D5E">
    </a-sphere>
    <a-box position="-1 0.5 1" rotation="0 45 0" width="1" height="1" depth="1" color="#4CC3D9"></a-box>
    <a-cylinder position="1 0.75 1" radius="0.5" height="1.5" color="#FFC65D"></a-cylinder>
    <a-plane rotation="-90 0 0" width="4" height="4" color="#7BC8A4"></a-plane>
    <a-sky color="#ECECEC"></a-sky>
    <!-- 1st person camera -->
    <a-entity>
      <a-camera id="primaryCamera" position="0 0 3">
      </a-camera>
      <a-camera position="0 20 3" active="false">
        <a-sphere id="icon" position="0 0 0" radius="0.1" color="#EF2D5E"></a-sphere>
      </a-camera>
    </a-entity>
    <!-- spectator camera -->
    <a-entity camera="active:false; fov:1; far:280" spectator="canvas:#spectatorDiv;" active="false" look-controls="enabled: false" wasd-controls="enabled: false" id="secondaryCamera" position="0 300 0" rotation="-90 0 0">
    </a-entity>
    <!-- mini-map -->
    <a-plane position="0 21 0" rotation="-90 0 0" width="4" height="4" src="#map-tex"></a-plane>
  </a-scene>
  <div class="container" id="spectatorDiv">
  </div>
</body>

</html>

我写了一个组件来帮助解决这个问题。 https://github.com/diarmidmackenzie/aframe-multi-camera

下面是一个将场景的平面图渲染到相机前面固定位置的平面的示例。

<!DOCTYPE html>
<html>
  <head>
      <script src="https://aframe.io/releases/1.2.0/aframe.min.js"></script>
      <script src="https://cdn.jsdelivr.net/gh/diarmidmackenzie/aframe-multi-camera@latest/src/multi-camera.min.js"></script>  
  </head>
  <body>
      <a-scene>
        <!-- arbitrary texture used to initialize materia on the target canvas-->
        <img id="test-texture" src="https://cdn.aframe.io/examples/ui/kazetachinu.jpg" crossorigin="anonymous"/>
        <a-camera>
          <!-- this is the plane that the plan-camera is rendered to-->
          <!-- child of main camera so that it stays in a fixed position-->
          <a-plane id="texture-plane1" position="0.4 0.4 -1" rotation="0 0 0" width="0.3" height="0.3"
                   src='#test-texture'></a-plane>
        </a-camera>
        
        <!-- some scene content-->        
        <a-entity id="container" position = "0 0 -6">
          <a-box position="-1 0.5 1" rotation="0 0 0" color="#4CC3D9"
                 animation="property: rotation; to: 0 360 0; loop: true; dur: 3000; easing: linear"></a-box>
          <a-sphere position="0 1.25 -1" radius="1.25" color="#EF2D5E"></a-sphere>
          <a-cylinder position="1 0.75 1" radius="0.5" height="1.5" color="#FFC65D"
                      animation="property: rotation; to: 360 0 0; loop: true; dur: 3000; easing: linear"></a-cylinder>
          <a-plane position="0 0 0" rotation="-90 0 0" width="4" height="4" color="#7BC8A4" side="double"></a-plane>
        </a-entity>
                        

        <a-entity id="plan-camera" secondary-camera="output:plane; outputElement:#texture-plane1; sequence:before"
                  position="0 9 -6" rotation="-90 0 0">
        </a-entity>        

        <a-sky color="#ECECEC"></a-sky>

      </a-scene>
  </body>
</html>

这里也是一个小故障: https://glitch.com/edit/#!/rattle-ruby-badger

还有人建议我 post 此处 multi-camera 组件的完整源代码。

这里是...

/* System that supports capture of the the main A-Frame render() call
   by add-render-call */
AFRAME.registerSystem('add-render-call', {

  init() {

    this.render = this.render.bind(this);
    this.originalRender = this.el.sceneEl.renderer.render;
    this.el.sceneEl.renderer.render = this.render;
    this.el.sceneEl.renderer.autoClear = false;

    this.preRenderCalls = [];
    this.postRenderCalls = [];
    this.suppresssDefaultRenderCount = 0;
  },

  addPreRenderCall(render) {
    this.preRenderCalls.push(render)
  },

  removePreRenderCall(render) {
    const index = this.preRenderCalls.indexOf(render);
    if (index > -1) {
      this.preRenderCalls.splice(index, 1);
    }
  },

  addPostRenderCall(render) {
    this.postRenderCalls.push(render)
  },

  removePostRenderCall(render) {
    const index = this.postRenderCalls.indexOf(render);
    if (index > -1) {
      this.postRenderCalls.splice(index, 1);
    }
    else {
      console.warn("Unexpected failure to remove render call")
    }
  },

  suppressOriginalRender() {
    this.suppresssDefaultRenderCount++;
  },

  unsuppressOriginalRender() {
    this.suppresssDefaultRenderCount--;

    if (this.suppresssDefaultRenderCount < 0) {
      console.warn("Unexpected unsuppression of original render")
      this.suppresssDefaultRenderCount = 0;
    }
  },

  render(scene, camera) {

    renderer = this.el.sceneEl.renderer

    // set up THREE.js stats to correctly count across all render calls.
    renderer.info.autoReset = false;
    renderer.info.reset();

    this.preRenderCalls.forEach((f) => f());

    if (this.suppresssDefaultRenderCount <= 0) {
      this.originalRender.call(renderer, scene, camera)
    }

    this.postRenderCalls.forEach((f) => f());
  }
});

/* Component that captures the main A-Frame render() call
   and adds an additional render call.
   Must specify an entity and component that expose a function call render(). */
AFRAME.registerComponent('add-render-call', {

  multiple: true,

  schema: {
    entity: {type: 'selector'},
    componentName: {type: 'string'},
    sequence: {type: 'string', oneOf: ['before', 'after', 'replace'], default: 'after'}
  },

  init() {

    this.invokeRender = this.invokeRender.bind(this);

  },

  update(oldData) {

    // first clean up any old settings.
    this.removeSettings(oldData)

    // now add new settings.
    if (this.data.sequence === "before") {
        this.system.addPreRenderCall(this.invokeRender)
    }

    if (this.data.sequence === "replace") {
        this.system.suppressOriginalRender()
    }

    if (this.data.sequence === "after" ||
        this.data.sequence === "replace")
     {
      this.system.addPostRenderCall(this.invokeRender)
    }
  },

  remove() {
    this.removeSettings(this.data)
  },

  removeSettings(data) {
    if (data.sequence === "before") {
        this.system.removePreRenderCall(this.invokeRender)
    }

    if (data.sequence === "replace") {
        this.system.unsuppressOriginalRender()
    }

    if (data.sequence === "after" ||
        data.sequence === "replace")
     {
      this.system.removePostRenderCall(this.invokeRender)
    }
  },

  invokeRender()
  {
    const componentName = this.data.componentName;
    if ((this.data.entity) &&
        (this.data.entity.components[componentName])) {
        this.data.entity.components[componentName].render(this.el.sceneEl.renderer, this.system.originalRender);
    }
  }
});

/* Component to set layers via HTML attribute. */
AFRAME.registerComponent('layers', {
    schema : {type: 'number', default: 0},

    init: function() {

        setObjectLayer = function(object, layer) {
            if (!object.el ||
                !object.el.hasAttribute('keep-default-layer')) {
                object.layers.set(layer);
            }
            object.children.forEach(o => setObjectLayer(o, layer));
        }

        this.el.addEventListener("loaded", () => {
            setObjectLayer(this.el.object3D, this.data);
        });

        if (this.el.hasAttribute('text')) {
            this.el.addEventListener("textfontset", () => {
                setObjectLayer(this.el.object3D, this.data);
            });
        }
    }
});

/* This component has code in common with viewpoint-selector-renderer
   However it's a completely generic stripped-down version, which
   just delivers the 2nd camera function.
   i.e. it is missing:
   - The positioning of the viewpoint-selector entity.
   - The cursor / raycaster elements.
*/

AFRAME.registerComponent('secondary-camera', {
    schema: {
        output: {type: 'string', oneOf: ['screen', 'plane'], default: 'screen'},
        outputElement: {type: 'selector'},
        cameraType: {type: 'string', oneOf: ['perspective, orthographic'], default: 'perspective'},
        sequence: {type: 'string', oneOf: ['before', 'after', 'replace'], default: 'after'},
        quality: {type: 'string', oneOf: ['high, low'], default: 'high'}
    },

    init() {

        if (!this.el.id) {
          console.error("No id specified on entity.  secondary-camera only works on entities with an id")
        }

        this.savedViewport = new THREE.Vector4();
        this.sceneInfo = this.prepareScene();
        this.activeRenderTarget = 0;



        // add the render call to the scene
        this.el.sceneEl.setAttribute(`add-render-call__${this.el.id}`,
                                     {entity: `#${this.el.id}`,
                                      componentName: "secondary-camera",
                                      sequence: this.data.sequence});

        // if there is a cursor on this entity, set it up to read this camera.
        if (this.el.hasAttribute('cursor')) {
          this.el.setAttribute("cursor", "canvas: user; camera: user");

          this.el.addEventListener('loaded', () => {
                this.el.components['raycaster'].raycaster.layers.mask = this.el.object3D.layers.mask;

                const cursor = this.el.components['cursor'];
                cursor.removeEventListeners();
                cursor.camera = this.camera;
                cursor.canvas = this.data.outputElement;
                cursor.canvasBounds = cursor.canvas.getBoundingClientRect();
                cursor.addEventListeners();
                cursor.updateMouseEventListeners();
            });
        }

        if (this.data.output === 'plane') {
          if (!this.data.outputElement.hasLoaded) {
            this.data.outputElement.addEventListener("loaded", () => {
              this.configureCameraToPlane()
            });
          } else {
            this.configureCameraToPlane()
          }
        }
    },

    configureCameraToPlane() {
      const object = this.data.outputElement.getObject3D('mesh');
      function nearestPowerOf2(n) {
        return 1 << 31 - Math.clz32(n);
      }
      // 2 * nearest power of 2 gives a nice look, but at a perf cost.
      const factor = (this.data.quality === 'high') ? 2 : 1;

      const width = factor * nearestPowerOf2(window.innerWidth * window.devicePixelRatio);
      const height = factor * nearestPowerOf2(window.innerHeight * window.devicePixelRatio);

      function newRenderTarget() {
        const target = new THREE.WebGLRenderTarget(width,
                                                   height,
                                                   {
                                                      minFilter: THREE.LinearFilter,
                                                      magFilter: THREE.LinearFilter,
                                                      stencilBuffer: false,
                                                      generateMipmaps: false
                                                    });

         return target;
      }
      // We use 2 render targets, and alternate each frame, so that we are
      // never rendering to a target that is actually in front of the camera.
      this.renderTargets = [newRenderTarget(),
                            newRenderTarget()]

      this.camera.aspect = object.geometry.parameters.width /
                           object.geometry.parameters.height;

    },

    remove() {

      this.el.sceneEl.removeAttribute(`add-render-call__${this.el.id}`);
      if (this.renderTargets) {
        this.renderTargets[0].dispose();
        this.renderTargets[1].dispose();
      }

      // "Remove" code does not tidy up adjustments made to cursor component.
      // rarely necessary as cursor is typically put in place at the same time
      // as the secondary camera, and so will be disposed of at the same time.
    },

    prepareScene() {
        this.scene = this.el.sceneEl.object3D;

        const width = 2;
        const height = 2;

        if (this.data.cameraType === "orthographic") {
            this.camera = new THREE.OrthographicCamera( width / - 2, width / 2, height / 2, height / - 2, 1, 1000 );
        }
        else {
            this.camera = new THREE.PerspectiveCamera( 45, width / height, 1, 1000);
        }

        this.scene.add(this.camera);
        return;
    },

    render(renderer, renderFunction) {

        // don't bother rendering to screen in VR mode.
        if (this.data.output === "screen" && this.el.sceneEl.is('vr-mode')) return;

        var elemRect;

        if (this.data.output === "screen") {
           const elem = this.data.outputElement;

           // get the viewport relative position of this element
           elemRect = elem.getBoundingClientRect();
           this.camera.aspect = elemRect.width / elemRect.height;
        }

        // Camera position & layers match this entity.
        this.el.object3D.getWorldPosition(this.camera.position);
        this.el.object3D.getWorldQuaternion(this.camera.quaternion);
        this.camera.layers.mask = this.el.object3D.layers.mask;

        this.camera.updateProjectionMatrix();

        if (this.data.output === "screen") {
          // "bottom" position is relative to the whole viewport, not just the canvas.
          // We need to turn this into a distance from the bottom of the canvas.
          // We need to consider the header bar above the canvas, and the size of the canvas.
          const mainRect = renderer.domElement.getBoundingClientRect();

          renderer.getViewport(this.savedViewport);

          renderer.setViewport(elemRect.left - mainRect.left,
                               mainRect.bottom - elemRect.bottom,
                               elemRect.width,
                               elemRect.height);

          renderFunction.call(renderer, this.scene, this.camera);
          renderer.setViewport(this.savedViewport);
        }
        else {
          // target === "plane"

          // store off current renderer properties so that they can be restored.
          const currentRenderTarget = renderer.getRenderTarget();
          const currentXrEnabled = renderer.xr.enabled;
          const currentShadowAutoUpdate = renderer.shadowMap.autoUpdate;

          // temporarily override renderer proeperties for rendering to a texture.
          renderer.xr.enabled = false; // Avoid camera modification
          renderer.shadowMap.autoUpdate = false; // Avoid re-computing shadows

          const renderTarget = this.renderTargets[this.activeRenderTarget];
          renderTarget.texture.encoding = renderer.outputEncoding;
          renderer.setRenderTarget(renderTarget);
          renderer.state.buffers.depth.setMask( true ); // make sure the depth buffer is writable so it can be properly cleared, see #18897
          renderer.clear();

          renderFunction.call(renderer, this.scene, this.camera);

          this.data.outputElement.getObject3D('mesh').material.map = renderTarget.texture;

          // restore original renderer settings.
          renderer.setRenderTarget(currentRenderTarget);
          renderer.xr.enabled = currentXrEnabled;
          renderer.shadowMap.autoUpdate = currentShadowAutoUpdate;

          this.activeRenderTarget = 1 - this.activeRenderTarget;
        }
    }
});