如何使用6 sampler2D实现textureCube

问题描述 投票:0回答:1

在我使用samplerCube渲染立方体之前

这是我以前的fragmentShader代码

"uniform samplerCube tCubeTgt;",
"varying vec3 posTgt;",
"void main() {",
    "vec4 reflectedColorTgt = textureCube( tCubeTgt, vec3( -posTgt.x, posTgt.yz ) );",
    "gl_FragColor = reflectedColorTgt ;",
"}"

现在我想使用6 sampler2D来渲染一个立方体。

我该怎么办

glsl webgl
1个回答
1
投票

为什么?

在查看立方体映射如何工作的任何情况下,OpenGL ES 2.0规范中的函数和表显示了多维数据集映射的工作原理

enter image description here

其中sc,tc和ma来自此表

Major Axis Direction|        Target             |sc |tc |ma |
--------------------+---------------------------+---+---+---+
       +rx          |TEXTURE_CUBE_MAP_POSITIVE_X|−rz|−ry| rx|
       −rx          |TEXTURE_CUBE_MAP_NEGATIVE_X| rz|−ry| rx|
       +ry          |TEXTURE_CUBE_MAP_POSITIVE_Y| rx| rz| ry|
       −ry          |TEXTURE_CUBE_MAP_NEGATIVE_Y| rx|−rz| ry|
       +rz          |TEXTURE_CUBE_MAP_POSITIVE_Z| rx|−ry| rz|
       −rz          |TEXTURE_CUBE_MAP_NEGATIVE_Z|−rx|−ry| rz|
--------------------+---------------------------+---+---+---+

表3.21:基于纹理坐标的长轴方向选择立方体贴图图像

使用它可以使用6个2d纹理而不是一个立方体贴图来创建将应用相同逻辑的函数

"use strict";

/* global document, twgl, requestAnimationFrame */

const vs = `
uniform mat4 u_model;
uniform mat4 u_view;
uniform mat4 u_projection;
uniform vec3 u_camera;

attribute vec4 position;
attribute vec3 normal;

varying vec3 v_normal;
varying vec3 v_eyeToSurface;

void main() {
  vec4 world = u_model * position;
  gl_Position = u_projection * u_view * world;
  v_eyeToSurface = world.xyz - u_camera;
  v_normal = (u_model * vec4(normal, 0)).xyz;
}
`;
const fs = `
precision mediump float;

varying vec3 v_eyeToSurface;
varying vec3 v_normal;

uniform sampler2D u_textures[6];

void cubemap(vec3 r, out float texId, out vec2 st) {
   vec3 uvw;
   vec3 absr = abs(r);
   if (absr.x > absr.y && absr.x > absr.z) {
     // x major
     float negx = step(r.x, 0.0);
     uvw = vec3(r.zy, absr.x) * vec3(mix(-1.0, 1.0, negx), -1, 1);
     texId = negx;
   } else if (absr.y > absr.z) {
     // y major
     float negy = step(r.y, 0.0);
     uvw = vec3(r.xz, absr.y) * vec3(1.0, mix(1.0, -1.0, negy), 1.0);
     texId = 2.0 + negy;
   } else {
     // z major
     float negz = step(r.z, 0.0);
     uvw = vec3(r.xy, absr.z) * vec3(mix(1.0, -1.0, negz), -1, 1);
     texId = 4.0 + negz;
   }
   st = vec2(uvw.xy / uvw.z + 1.) * .5;
}

vec4 texCubemap(vec3 uvw) {
  float texId;
  vec2 st;
  cubemap(uvw, texId, st);
  vec4 color = vec4(0);
  for (int i = 0; i < 6; ++i) {
    vec4 side = texture2D(u_textures[i], st);
    float select = step(float(i) - 0.5, texId) * 
                   step(texId, float(i) + .5);
    color = mix(color, side, select);
  }
  return color;
}

void main() {
  vec3 normal = normalize(v_normal);
  vec3 eyeToSurface = normalize(v_eyeToSurface);
  gl_FragColor = texCubemap(reflect(eyeToSurface, normal));
}
`;

const m4 = twgl.m4;
const gl = document.getElementById("c").getContext("webgl");
// compile shaders, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);

// create buffers
const models = [
  twgl.primitives.createSphereBufferInfo(gl, 1, 12, 8),
  twgl.primitives.createCubeBufferInfo(gl, 1.5),
  twgl.primitives.createTorusBufferInfo(gl, .7, .5, 12, 8),
];

const textures = twgl.createTextures(gl, {
  posx: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/posx.jpg', },
  negx: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/negx.jpg', },
  posy: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/posy.jpg', },
  negy: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/negy.jpg', },
  posz: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/posz.jpg', },
  negz: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/negz.jpg', },
});

const uniforms = {
  u_textures: [
    textures.posx,
    textures.negx,
    textures.posy,
    textures.negy,
    textures.posz,
    textures.negz,
  ],
};

function render(time) {
  time *= 0.001;
  twgl.resizeCanvasToDisplaySize(gl.canvas);
  gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
  
  gl.enable(gl.DEPTH_TEST);
  gl.enable(gl.CULL_FACE);
  gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);

  const fov = 30 * Math.PI / 180;
  const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
  const zNear = 0.5;
  const zFar = 20;
  const projection = m4.perspective(fov, aspect, zNear, zFar);
  
  const eye = [
    Math.sin(time) * 7, 
    Math.sin(time * .5) * 3, 
    Math.cos(time) * 7,
  ];
  const target = [0, 0, 0];
  const up = [0, 1, 0];

  const camera = m4.lookAt(eye, target, up);
  const view = m4.inverse(camera);

  uniforms.u_camera = eye;
  uniforms.u_projection = projection;
  uniforms.u_view = view;
  
  gl.useProgram(programInfo.program);
  
  models.forEach((bufferInfo, ndx) => {
    let u = ndx / (models.length - 1) * 2 - 1;
    let model = m4.translation([u * (models.length - 1), 0, 0]);
    model = m4.rotateY(model, time * (ndx + 1) * 0.7);
    uniforms.u_model = m4.rotateX(model, time * (ndx + 1) * 0.2);

    twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
    twgl.setUniforms(programInfo, uniforms);
    gl.drawElements(gl.TRIANGLES, bufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
  });

  requestAnimationFrame(render);
}
requestAnimationFrame(render);
body {
  margin: 0;
}
canvas {
  display: block;
  width: 100vw;
  height: 100vh;
}
<canvas id="c"></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>

但你会发现有很多问题。一个问题是在边缘做什么。 GPU可以跨越边缘过滤?我们必须添加一些东西来做到这一点。此外,我们不能对采样器使用随机访问,也不能有条件地访问采样器,这意味着使用6个2d纹理而不是一个立方体贴图来实现它会慢很多

© www.soinside.com 2019 - 2024. All rights reserved.