C#/ OpenTK。
我一直很难找到一种在场景中组合不同几何类型的好方法。出于效率原因,我一直在尝试将相同的着色器和单个VAO / VBO用于混合图元类型(三角形,线条和可能的线条)。不知道这是否可能/期望。
我遇到的主要问题是三角形是实例化的,并且它们输入到顶点着色器中的输入必须是顶点和法线。但是,这些线不是实例化的,不需要照明,而是不同的颜色,因此,它们输入到顶点着色器中所需的输入必须是顶点和颜色。因此,在一种情况下,我想要着色器中的法线,在另一种情况下,我想要颜色。
一种解决方案是在着色器中将此条件设置为有条件。我已经在顶点着色器的顶部找到了它:
layout(location = 0) in vec3 aPos;
layout(location = 1) in vec3 aNormal;
layout(location = 2) in vec3 vertexColor;
[如果我尝试将此条件设为条件(即位置2 =正常或颜色取决于基本类型),则会收到语法错误。由于这些行是main()
之外的定义,因此并不感到特别惊讶。我认为那里不允许使用If语句。
另一个解决方案是使用单独的着色器,但如果可能的话,我希望避免重复。
目前,我已将多边形和直线分成两个单独的缓冲区,因为这似乎合乎逻辑。我可以使用相同的着色器(不添加用于填充的虚拟值来填充它们,这可以使数据结构一致但效率不是很高)吗?理想情况下,我有办法告诉我在一种情况下传递法线,而在另一种情况下传递顶点。
还有一些我不理解的行为。根据我在该主题上可以找到的所有内容,我认为index
中的VertexAttribPointer
参数必须与着色器中的location
相对应。但这不是它的行为方式。我得到了顶点数据,该索引给出的索引为2(在下面的代码中,朝InitVertexBuffers
的末尾可见),该索引与着色器中的颜色相对应-但此几何形状已正确呈现。
所以我的问题是:
1)是否可以将着色器中的layout
定义作为条件?
2)是否应将不同基本类型的数据保存在单独的缓冲区中?
3)location
的第一个参数(VertexAttribPointer
)是否不告诉OpenGL如何处理每组数据?如果没有,如何确定?
private void InitVertexBuffers()
{
int floatSize = sizeof(float);
VBO1 = GL.GenBuffer();
VAO1 = GL.GenVertexArray();
// Bind Vertex Array Object...
GL.BindVertexArray(VAO1);
// Copy our vertices array in a buffer for OpenGL to use...
GL.BindBuffer(BufferTarget.ArrayBuffer, VBO1);
List<int> array_lengths = new List<int>() {
vertices.Length * floatSize };
// Calculate and store offset pointers...
List<int> array_offsets = new List<int>() {
0 };
// Set vertex attributes pointers...
GL.BufferData(BufferTarget.ArrayBuffer, vertices.Length * floatSize, (IntPtr)0, BufferUsageHint.StaticDraw);
GL.BufferSubData(BufferTarget.ArrayBuffer, (IntPtr)0, array_lengths[0], vertices);
// Sensor offset vertices...
GL.VertexAttribPointer(0, 3, VertexAttribPointerType.Float, false, 6 * floatSize, 0);
// Normals...
GL.VertexAttribPointer(1, 3, VertexAttribPointerType.Float, false, 6 * floatSize, 3 * floatSize);
GL.EnableVertexAttribArray(0);
GL.EnableVertexAttribArray(1);
// ---------------------------------------------------------------------------------------------------------------------------------------------------
VBO2 = GL.GenBuffer();
VAO2 = GL.GenVertexArray();
GL.BindVertexArray(VAO2);
GL.BindBuffer(BufferTarget.ArrayBuffer, VBO2);
array_lengths = new List<int>() {
line_vertices.Length * floatSize,
axis_widget_vertices.Length * floatSize,
};
array_offsets = new List<int>() {
0,
array_lengths[0] };
GL.BufferData(BufferTarget.ArrayBuffer, (line_vertices.Length + axis_widget_vertices.Length) * floatSize, (IntPtr)0, BufferUsageHint.StaticDraw);
GL.BufferSubData(BufferTarget.ArrayBuffer, (IntPtr)array_offsets[0], array_lengths[0], line_vertices);
GL.BufferSubData(BufferTarget.ArrayBuffer, (IntPtr)array_offsets[1], array_lengths[1], axis_widget_vertices);
// Individual lines...
GL.VertexAttribPointer(0, 3, VertexAttribPointerType.Float, false, 6 * floatSize, array_offsets[0]);
GL.VertexAttribPointer(1, 3, VertexAttribPointerType.Float, false, 6 * floatSize, array_offsets[0] + 3 * floatSize);
GL.VertexAttribPointer(2, 3, VertexAttribPointerType.Float, false, 6 * floatSize, array_offsets[1]);
GL.VertexAttribPointer(3, 3, VertexAttribPointerType.Float, false, 6 * floatSize, array_offsets[1] + 3 * floatSize);
for (int i = 0; i < 4; i++) // zero to three
{
GL.EnableVertexAttribArray(i);
}
GL.Enable(EnableCap.DepthTest); // enable z buffer
}
private void Draw()
{
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
GL.BindVertexArray(VAO1);
SetBool(Handle, "instanced", 1);
GL.DrawArraysInstanced(PrimitiveType.Triangles, 0, vertices.Length / 6, allNodes.Count);
SetBool(Handle, "instanced", 0);
GL.BindVertexArray(VAO2);
SetBool(Handle, "use_lighting", 0);
GL.DrawArrays(PrimitiveType.Lines, 0, line_vertices.Length / 6);
GL.DrawArrays(PrimitiveType.Lines, line_vertices.Length / 6, axis_widget_vertices.Length / 6);
SetBool(Handle, "use_lighting", 1);
glControl1.SwapBuffers();
}
string vertexShaderSource =
"# version 330 core\n" +
"uniform mat4 model;" +
"uniform mat4 view;" +
"uniform mat4 projection;" +
"uniform bool instanced;" +
$"uniform vec3 offsets[{ allNodes.Count }];" +
$"uniform vec3 colors[{ allNodes.Count }];" +
"layout(location = 0) in vec3 aPos;" +
"layout(location = 1) in vec3 aNormal;" +
"layout(location = 2) in vec3 vertexColor;" +
"out vec3 color_out;" +
"out vec3 FragPos;" +
"out vec3 Normal;" +
"void main()" +
"{" +
"vec3 offset = offsets[gl_InstanceID];" +
"gl_Position = projection * view * model * vec4(aPos + offset, 1.0);" +
"FragPos = vec3(model * vec4(aPos + offset, 1.0));" +
"Normal = vec3(model * vec4(aNormal, 1.0));" + // (normal has to be rotated the same as the model)
"if (instanced) {" +
"color_out = colors[gl_InstanceID];" +
"} else {" +
"color_out = vec3(1.0,1.0,1.0);" +
"}" +
"}";
我将在这里基本上回答我自己的问题。感谢Nicol Bolas的评论,这使我认为单独的着色器将是合适的。我已经实现了这个想法,这似乎是最好的方法。
我现在为多边形和线数据获得了单独的着色器。两对着色器均在开始时初始化,并且一次调用GL.UseProgram()
在它们之间切换。这使我能够大大简化用于渲染线条的着色器。
我认为(?)我一直在理解VertexAttribPointer()
索引号的问题是因为我对它进行的第三次和第四次呼叫实际上是多余的事实所致。由于数据在缓冲区中是连续的,并且VertexAttribPointer()
仅指向该数据的开头,因此前两个调用足以分别定义顶点和颜色数据的位置。然后,对GL.DrawArrays()
的调用将准确确定要使用的数据块。
一些修订的代码:
public void Render() {
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
GL.UseProgram(Handle); // shaders for polygons
GL.BindVertexArray(VAO1);
SetBool(Handle, "instanced", 1);
GL.DrawArraysInstanced(PrimitiveType.Triangles, 0, vertices.Length / 6, allNodes.Count);
SetBool(Handle, "instanced", 0);
GL.BindVertexArray(VAO2);
GL.UseProgram(Handle2); // shaders for lines
GL.DrawArrays(PrimitiveType.Lines, 0, line_vertices.Length / 6);
GL.DrawArrays(PrimitiveType.Lines, line_vertices.Length / 6, axis_widget_vertices.Length / 6);
glControl1.SwapBuffers();
}
string vertexShaderSource = // for polygons
"# version 330 core\n" +
"uniform mat4 model;" +
"uniform mat4 view;" +
"uniform mat4 projection;" +
"uniform bool instanced;" +
$"uniform vec3 offsets[{ allNodes.Count }];" +
$"uniform vec3 colors[{ allNodes.Count }];" +
"layout(location = 0) in vec3 aPos;" +
"layout(location = 1) in vec3 aNormal;" +
"out vec3 color_out;" +
"out vec3 FragPos;" +
"out vec3 Normal;" +
"void main()" +
"{" +
"vec3 offset = offsets[gl_InstanceID];" +
"gl_Position = projection * view * model * vec4(aPos + offset, 1.0);" +
"FragPos = vec3(model * vec4(aPos + offset, 1.0));" +
"Normal = vec3(model * vec4(aNormal, 1.0));" + // (normal has to be rotated the same as the model)
"if (instanced) {" +
"color_out = colors[gl_InstanceID];" +
"} else {" +
"color_out = vec3(1.0,1.0,1.0);" +
"}" +
"}";
string vertexShader2 = // for lines
"# version 330 core\n" +
"uniform mat4 model;" +
"uniform mat4 view;" +
"uniform mat4 projection;" +
"layout(location = 0) in vec3 aPos;" +
"layout(location = 1) in vec3 vertexColor;" +
"out vec3 color_out;" +
"out vec3 FragPos;" +
"void main()" +
"{" +
"gl_Position = projection * view * model * vec4(aPos, 1.0);" +
"FragPos = vec3(model * vec4(aPos, 1.0));" +
"color_out = vertexColor;" +
"}";
string fragmentShaderSource = // for polygons
"#version 330 core\n" +
"in vec3 FragPos;" +
"in vec3 Normal;" +
"in vec3 color_out;" +
"out vec4 FragColor;" +
"uniform vec3 objectColor;" +
"uniform vec3 lightColor;" +
"uniform vec3 lightPos;" +
"uniform bool use_lighting;" +
"void main()" +
"{" +
"if (use_lighting) {" +
"float ambientStrength = 0.5;" +
"vec3 ambient = ambientStrength * lightColor;" +
"vec3 norm = Normal;" +
"vec3 lightDir = normalize(lightPos - FragPos);" +
"float diff = max(dot(norm, lightDir), 0.0);" +
"vec3 diffuse = diff * lightColor;" +
"vec3 result = (ambient + diffuse) * color_out;" + // color_out = object colour from vertex shader
"FragColor = vec4(result, 1.0);" +
"} else {" +
"FragColor = vec4(color_out, 1.0);" +
"}" +
"}";
string fragmentShader2 = // for lines
"#version 330 core\n" +
"in vec3 FragPos;" +
"in vec3 color_out;" +
"out vec4 FragColor;" +
"void main()" +
"{" +
"FragColor = vec4(color_out, 1.0);" +
"}";