I am currently writing a graphics engine for OpenGL in C++. Here's the source if you're interested: https://github.com/freddycansic/OpenGL
These are the results when rendering a cube with a solid colour or a texture on my laptop (Ryzen 5500U, integrated Radeon graphics):
https://i.sstatic.net/gKqYJ.jpg
These are the results when rendering the same on my desktop (Ryzen 5 2400G, NVIDIA 1060):
https://i.sstatic.net/oG97j.jpg
As you can see artefacts begin appearing when rendering using a solid colour on my desktop.
When rendering more than 1 texture I run into more issues:
Laptop working as normal: https://i.sstatic.net/lN9a0.jpg
Desktop 2 textures merging together?: https://i.sstatic.net/1oPj6.jpg
The general code for reproducing this is as follows
Shader code:
Vertex shader:
#version 330 core
layout(location = 0) in vec4 a_Position;
layout(location = 1) in vec4 a_Color;
layout(location = 2) in vec2 a_TexCoord;
layout(location = 3) in float a_TexID;
out vec4 v_Color;
out vec2 v_TexCoord;
out float v_TexID;
uniform mat4 u_ViewProj;
void main() {
gl_Position = u_ViewProj * a_Position;
v_Color = a_Color;
v_TexCoord = a_TexCoord;
v_TexID = a_TexID;
};
Fragment shader:
#version 330 core
out vec4 color;
in vec4 v_Color;
in vec2 v_TexCoord;
in float v_TexID;
uniform sampler2D u_Textures[32];
void main() {
int index = int(v_TexID);
if (index < 0) { // if index < 0 do a color
color = v_Color;
}
else { // else do a texture
color = texture(u_Textures[index], v_TexCoord);
}
};
Create vertex array, shader, allocate memory for vertex buffer and index buffer
unsigned int program = glCreateProgram();
unsigned int vertexShader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertexShader, 1, &vertexSource, nullptr);
glCompileShader(vertexShader);
// same with fragment
glAttachShader(program, vertexShader);
glAttachShader(program, fragmentShader);
glLinkProgram(program);
glValidateProgram(program);
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
unsigned int vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
unsigned int vbo;
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, 50000 * sizeof(Vertex), nullptr, GL_DYNAMIC_DRAW);
unsigned int ibo;
glGenBuffers(1, &ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, 75000 * sizeof(GLuint), nullptr, GL_DYNAMIC_DRAW);
Enable vertex attributes for position, colour, texture coordinates and texture ID.
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 40, (const void*)0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 40, (const void*)12);
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 40, (const void*)28);
glEnableVertexAttribArray(3);
glVertexAttribPointer(3, 1, GL_FLOAT, GL_FALSE, 40, (const void*)36);
Now generate vertices and indices for a cube which end up as follows:
Texture ID of -1
denotes that we are not using a texture.
{{0, 0, 0}, {1, 0, 0, 1}, {0, 0}, -1}
// ... ommited for sanity
{{1, 1, 1}, {1, 0, 0, 1}, {1, 1}, -1}
0, 1, 2,
// ...
6, 7, 4
These vertices and indices are then stored in separate vectors on the CPU until the batch concludes when:
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(Vertex) * CPUVertexBuffer.size(), CPUVertexBuffer.data());
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 0, sizeof(GLuint) * CPUIndexBuffer.size(), CPUIndexBuffer.data());
glUseProgram(program);
glBindVertexArray(vao);
glDrawElements(GL_TRIANGLES, (GLsizei) CPUIndexBuffer.size(), GL_UNSIGNED_INT, nullptr);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
If anyone has seen anything like this before or knows of any reason that would be causing this I would love to hear from you.
int index = int(v_TexID); if (index < 0) { // if index < 0 do a color color = v_Color; } else { // else do a texture color = texture(u_Textures[index], v_TexCoord); }
This code is wrong in two ways. The GLSL spec clearly states in section 4.1.7:
Texture-combined sampler types are opaque types, declared and behaving as described above for opaque types. When aggregated into arrays within a shader, they can only be indexed with a dynamically uniform integral expression, otherwise results are undefined.
Within the rules of the GLSL spec, a single invocation group can be as big as the whole render API call, so unless your a_TexID
value isn't the same for all vertices in that render call, the results of this are undefined.
Furthermore, from section 8.1:
Some texture functions (non-“Lod” and non-“Grad” versions) may require implicit derivatives. Implicit derivatives are undefined within non-uniform control flow and for non-fragment shader texture fetches.
Since your texture
call uses implicit derivatives, it must not be used in non-uniform control flow.
What you're trying to do here cannot be achieved within the guarantees of OpenGL 4.6.
What you could do is:
sampler2D
s, use a single sampler2Darray
with an array texture. Selecting the layer from the array doen't have to be a dynamically uniform expression.GL_ARB_bindless_textures
extension, which also removes this restriction.