我有一个 GLSL 程序,它在片段着色器中使用 DDA 算法渲染体素。我尝试修改代码,以便可以调整每条射线的步长。目标是缩小体素的分辨率并增加渲染距离。目前,当相机不移动时它可以正常工作。但是,当我移动相机时,它似乎不会向前移动,直到它超过世界坐标中的步长。例如,当步长设置为2时,相机仅每2个坐标移动一次。当步长设置为 10 时,相机仅每 10 个坐标移动一次。
这是我的 GLSL 代码:
#version 330 core
out vec4 FragColor;
//uniforms
uniform vec3 u_CamPos;
uniform vec3 u_CamRot;
uniform float u_FOV;
uniform float u_Time;
uniform mat3 u_RotMat;
uniform sampler3D u_tex;
uniform vec2 u_chunkScale;
uniform int u_RenderDistance;
uniform int u_StepSize;
uniform float u_RoundFag;
#define SCREEN_RES 1000.0
vec3 Color = vec3(0, 0, 0);
vec2 iResolution = vec2(SCREEN_RES, SCREEN_RES);
vec2 fragCoord = vec2(0, 0);
vec2 screenPos = vec2(0, 0);
vec3 cameraDir = vec3(0, 0, 0);
vec3 cameraPlaneU = vec3(0, 0, 0);
vec3 cameraPlaneV = vec3(0, 0, 0);
vec3 rayDir = vec3(0, 0, 0);
vec3 rayPos = vec3(0, 0, 0);
// DEBUG
float depth = 0.0;
vec3 RoundDownVecTo(vec3 v, float r)
{
return floor(v / r) * r;
}
vec4 GetVoxel(vec3 pos, float r)
{
vec4 col = texture(u_tex, pos);
return col;
}
void main()
{
fragCoord = vec2(gl_FragCoord.x, gl_FragCoord.y);
screenPos = (fragCoord.xy / iResolution.xy) * 2.0 - 1.0;
cameraDir = normalize(u_RotMat * vec3(0.0, 0.0, u_FOV));
cameraPlaneU = normalize(u_RotMat * vec3(u_FOV, 0.0, 0.0));
cameraPlaneV = cross(cameraDir, cameraPlaneU) * iResolution.y / iResolution.x;
rayDir = cameraDir + screenPos.x * cameraPlaneU + screenPos.y * cameraPlaneV;
rayPos = u_CamPos;
ivec3 mapPos = ivec3(floor(rayPos + 0.));
vec3 deltaDist = abs(vec3(length(rayDir)) / (rayDir));
ivec3 rayStep = ivec3(sign(rayDir));
vec3 sideDist = (sign(rayDir) * (vec3(mapPos) - rayPos) + (sign(rayDir) * .5) + .5) * deltaDist;
bvec3 mask;
bool march = true;
int i = 0;
while (march)
{
mask = lessThanEqual(sideDist.xyz, min(sideDist.yzx, sideDist.zxy));
sideDist += (vec3(mask) * deltaDist);
vec3 step = vec3(rayStep);
mapPos += ivec3(mask) * ivec3(step) * u_StepSize;
vec3 samplePos = vec3(mapPos);
samplePos = RoundDownVecTo(samplePos, u_StepSize);
samplePos = vec3(samplePos.x / u_chunkScale.x, samplePos.y / u_chunkScale.y, samplePos.z / u_chunkScale.x);
vec4 _col = texture(u_tex, samplePos);
if (_col.w > 0.0)
{
Color = vec3(depth, depth, depth);
march = false;
}
//hit nothing
if (i >= u_RenderDistance)
{
Color = vec3(0, 1.3, 0.8);
march = false;
}
depth += u_RoundFag;
i++;
}
FragColor = vec4(vec3(Color), 1.0);
}
我尝试在 Unity 中将其可视化,但这并没有被证明很有帮助。我尝试对每个采样点进行四舍五入,但这只会使步长看起来好像增加了,尽管实际上并没有增加。我知道通过制服传递所有这些数据效率相当低,我计划在未来做出改变。
我厌倦了在shadertoy网站上调试它,但没有帮助。这是我写的代码。
const bool USE_BRANCHLESS_DDA = true;
const int MAX_RAY_STEPS = 64;
float stepSize = 2.0;
const int depthMode = 1;
float sdSphere(vec3 p, float d) { return length(p) - d; }
float sdBox( vec3 p, vec3 b )
{
vec3 d = abs(p) - b;
return min(max(d.x,max(d.y,d.z)),0.0) +
length(max(d,0.0));
}
vec3 RoundDownVecTo(vec3 v, float r)
{
return floor(v / r) * r;
}
bool getVoxel(ivec3 c)
{
vec3 p = vec3(c) + vec3(0.5);
p = RoundDownVecTo(p, 2.0);
float d = sdSphere(p, 2.5);
return d < 0.0;
}
vec2 rotate2d(vec2 v, float a) {
float sinA = sin(a);
float cosA = cos(a);
return vec2(v.x * cosA - v.y * sinA, v.y * cosA + v.x * sinA);
}
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
vec2 screenPos = (fragCoord.xy / iResolution.xy) * 2.0 - 1.0;
vec3 cameraDir = vec3(0.0, 0.0, 0.8);
vec3 cameraPlaneU = vec3(1.0, 0.0, 0.0);
vec3 cameraPlaneV = vec3(0.0, 1.0, 0.0) * iResolution.y / iResolution.x;
vec3 rayDir = cameraDir + screenPos.x * cameraPlaneU + screenPos.y * cameraPlaneV;
vec3 rayPos = vec3(0.0, 2.0 * sin(iTime * 2.7), -12.0);
rayPos.xz = rotate2d(rayPos.xz, iTime);
rayDir.xz = rotate2d(rayDir.xz, iTime);
ivec3 mapPos = ivec3(floor(rayPos));
vec3 deltaDist = abs(vec3(length(rayDir)) / rayDir);
ivec3 rayStep = ivec3(0,0,0);
vec3 sideDist = (sign(rayDir) * (vec3(mapPos) - rayPos) + (sign(rayDir) * 0.5) + 0.5) * deltaDist;
float depth = 0.0;
//###########################################################
if(rayDir.x < 0.0)
{
rayStep.x = -int(stepSize);
sideDist.x = (rayPos.x - float(mapPos.x)) * deltaDist.x;
}
else
{
rayStep.x = int(stepSize);
sideDist.x = (float(mapPos.x) + 1.0 - rayPos.x) * deltaDist.x;
}
//###########################################################
if (rayDir.y < 0.0)
{
rayStep.y = -int(stepSize);
sideDist.y = (rayPos.y - float(mapPos.y)) * deltaDist.y;
}
else
{
rayStep.y = int(stepSize);
sideDist.y = (float(mapPos.y) + 1.0 - rayPos.y) * deltaDist.y;
}
//###########################################################
if (rayDir.z < 0.0)
{
rayStep.z = -int(stepSize);
sideDist.z = (rayPos.z - float(mapPos.z)) * deltaDist.z;
}
else
{
rayStep.z = int(stepSize);
sideDist.z = (float(mapPos.z) + 1.0 - rayPos.z) * deltaDist.z;
}
//###########################################################
bvec3 mask;
for (int i = 0; i < MAX_RAY_STEPS; i++)
{
if (getVoxel(mapPos)) continue;
depth += 0.010;
mask = lessThanEqual(sideDist.xyz, min(sideDist.yzx, sideDist.zxy));
sideDist += vec3(mask) * deltaDist;
mapPos += ivec3(vec3(mask)) * rayStep;
}
vec3 color;
if (mask.x)
{
if(depthMode == 1)
color = vec3(depth);
else
color = vec3(1.0,0,0);
}
if (mask.y)
{
if(depthMode == 1)
color = vec3(depth);
else
color = vec3(0,1,0);
}
if (mask.z)
{
if(depthMode == 1)
color = vec3(depth);
else
color = vec3(0,0,1);
}
fragColor.rgb = color;
}
有很多方法可以更改 DDA 的体素网格大小。然而,通常最好保持比例不变,因为在处理单位体素大小时,该算法是最有效和最准确的。相反,您可以简单地更改表观体素大小。实现此目的的一种方法是将相机的位置乘以某个值,从而使物体看起来更小或具有更精细的体素分辨率。因为就着色器而言,这会将所有体素保持在整数位置,因此您可能需要向代码的其他部分添加一些缩放逻辑,例如用于生成世界。
下面是我对您的 Shadertoy 代码的更改版本。我缩放了
getVoxel
函数参数,以便您可以看到分辨率的变化。我还删除了更改 rayStep
向量的代码,因为它导致了您观察到的跳跃。
const bool USE_BRANCHLESS_DDA = true;
const int MAX_RAY_STEPS = 64;
float stepSize = 3.0;
const int depthMode = 1;
float sdSphere(vec3 p, float d) { return length(p) - d; }
float sdBox( vec3 p, vec3 b )
{
vec3 d = abs(p) - b;
return min(max(d.x,max(d.y,d.z)),0.0) +
length(max(d,0.0));
}
vec3 RoundDownVecTo(vec3 v, float r)
{
return floor(v / r) * r;
}
bool getVoxel(ivec3 c)
{
vec3 p = vec3(c) + vec3(0.5);
//p = RoundDownVecTo(p, 2.0);
p /= stepSize;
float d = sdSphere(p, 2.5);
return d < 0.0;
}
vec2 rotate2d(vec2 v, float a) {
float sinA = sin(a);
float cosA = cos(a);
return vec2(v.x * cosA - v.y * sinA, v.y * cosA + v.x * sinA);
}
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
vec2 screenPos = (fragCoord.xy / iResolution.xy) * 2.0 - 1.0;
vec3 cameraDir = vec3(0.0, 0.0, 0.8);
vec3 cameraPlaneU = vec3(1.0, 0.0, 0.0);
vec3 cameraPlaneV = vec3(0.0, 1.0, 0.0) * iResolution.y / iResolution.x;
vec3 rayDir = cameraDir + screenPos.x * cameraPlaneU + screenPos.y * cameraPlaneV;
vec3 rayPos = vec3(0.0, 2.0 * sin(iTime * 2.7), -12.0);
rayPos.xz = rotate2d(rayPos.xz, iTime);
rayDir.xz = rotate2d(rayDir.xz, iTime);
rayPos *= stepSize;
ivec3 mapPos = ivec3(floor(rayPos));
vec3 deltaDist = abs(vec3(length(rayDir)) / rayDir);
ivec3 rayStep = ivec3(sign(rayDir));
vec3 sideDist = (sign(rayDir) * (vec3(mapPos) - rayPos) + (sign(rayDir) * 0.5) + 0.5) * deltaDist;
float depth = 0.0;
bvec3 mask;
for (int i = 0; i < MAX_RAY_STEPS; i++)
{
if (getVoxel(mapPos)) continue;
depth += 0.010;
mask = lessThanEqual(sideDist.xyz, min(sideDist.yzx, sideDist.zxy));
sideDist += vec3(mask) * deltaDist;
mapPos += ivec3(vec3(mask)) * rayStep;
}
vec3 color;
if (mask.x)
{
if(depthMode == 1)
color = vec3(depth);
else
color = vec3(1.0,0,0);
}
if (mask.y)
{
if(depthMode == 1)
color = vec3(depth);
else
color = vec3(0,1,0);
}
if (mask.z)
{
if(depthMode == 1)
color = vec3(depth);
else
color = vec3(0,0,1);
}
fragColor.rgb = color;
}
初始着色器代码的问题在于您正在修改
rayStep
向量。这个变量的名称有点误导——它不会直接改变世界的规模;它会改变世界的大小。相反,它改变了世界数据的采样方式。当您将 rayStep
乘以 stepSize
时,它会有效地跳过每个轴上的每隔一个体素(在几乎所有情况下,地图位置每一步只应改变 1)。这与您在 getVoxel
函数内舍入位置的方式相结合,导致了您看到的跳跃。
有关另一个类似的示例(在 wgsl 中),请参阅 此着色器。