Search code examples
c++openglshadermesh

Opengl lighting illuminating the wrong surfaces


I am using OpenGL to display simple objects and a light above them. The problem is the faces of my object are not enlighted the right way. Here is my result

result

The light is supposed to be above the object I load objects from wavefront files like this :

        if ( strcmp( lineHeader, "v" ) == 0 ){
            glm::vec3 vertex;
            fscanf(file, "%f %f %f\n", &vertex.x, &vertex.y, &vertex.z );
            vertices.push_back(vertex);
}else if ( strcmp( lineHeader, "vt" ) == 0 ){
            glm::vec2 uv;
            fscanf(file, "%f %f\n", &uv.x, &uv.y );
            uv.y = uv.y; 
            // Invert V coordinate since we will only use DDS texture, which are inverted. Remove if you want to use TGA or BMP loaders.
            temp_uvs.push_back(uv);
        }else if ( strcmp( lineHeader, "vn" ) == 0 ){
            glm::vec3 normal;
            fscanf(file, "%f %f %f\n", &normal.x, &normal.y, &normal.z );
            temp_normals.push_back(normal);
        }else if ( strcmp( lineHeader, "f" ) == 0 ){
            std::string vertex1, vertex2, vertex3;
            unsigned int vertexIndex[3], uvIndex[3], normalIndex[3];
            int matches = fscanf(file, "%d/%d/%d %d/%d/%d %d/%d/%d\n", &vertexIndex[0], &uvIndex[0], &normalIndex[0], &vertexIndex[1], &uvIndex[1], &normalIndex[1], &vertexIndex[2], &uvIndex[2], &normalIndex[2] );
            if (matches != 9){
                printf("File can't be read by our simple parser :-( Try exporting with other options\n");
                    return false;
                }
                indices.push_back(vertexIndex[0]-1);
                indices.push_back(vertexIndex[1]-1);
                indices.push_back(vertexIndex[2]-1);
                uvIndices    .push_back(uvIndex[0]);
                uvIndices    .push_back(uvIndex[1]);
                uvIndices    .push_back(uvIndex[2]);
                normalIndices.push_back(normalIndex[0]);
                normalIndices.push_back(normalIndex[1]);
                normalIndices.push_back(normalIndex[2]);
            }else{
            // Probably a comment, eat up the rest of the line
                char stupidBuffer[1000];
                fgets(stupidBuffer, 1000, file);
            }
        }

        normals.reserve(indices.size());
        uvs.reserve(indices.size());

        for( unsigned int i=0; i<indices.size(); i++ ){

        // Get the indices of its attributes
            unsigned int uvIndex = uvIndices[i];
            unsigned int normalIndex = normalIndices[i];
            normals[indices[i]] = temp_normals[normalIndex-1];
            uvs[indices[i]] = temp_uvs[uvIndex-1];

the vertex shader :

#version 150 core

in vec2 color;
in vec3 position;
in vec3 normal; 


out vec2 UV;
out vec3 Position_worldspace;
out vec3 Normal_cameraspace;
out vec3 EyeDirection_cameraspace;
out vec3 LightDirection_cameraspace;

uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform vec3 LightPosition_worldspace;



void main() {

    // Position of the vertex, in worldspace : M * position
    Position_worldspace = (M * vec4(position.x , position.y , position.z ,1.0)).xyz;

    // Vector that goes from the vertex to the camera, in camera space.
    // In camera space, the camera is at the origin (0,0,0).
    vec3 vertexPosition_cameraspace = ( V * M * vec4(position,1)).xyz;
    EyeDirection_cameraspace = vec3(0,0,0) - vertexPosition_cameraspace;

    // Vector that goes from the vertex to the light, in camera space. M is ommited because it's identity.
    vec3 LightPosition_cameraspace = ( V * vec4(LightPosition_worldspace,1)).xyz;
    LightDirection_cameraspace = LightPosition_cameraspace + EyeDirection_cameraspace;

    // Normal of the the vertex, in camera space
    Normal_cameraspace = ( V * M * vec4(normal,0)).xyz; // Only correct if ModelMatrix does not scale the model ! Use its inverse transpose if not.

    // UV of the vertex. No special space for this one.
    UV = color;
    gl_Position = MVP*vec4(position.x , position.y , position.z , 1.0);

};

and my fragment shader is :

#version 150 core

// Interpolated values from the vertex shaders
in vec2 UV;
in vec3 Position_worldspace;
in vec3 Normal_cameraspace;
in vec3 EyeDirection_cameraspace;
in vec3 LightDirection_cameraspace;

out vec4 outColor

// Values that stay constant for the whole mesh.
uniform sampler2D myTextureSampler;
uniform vec3 LightPosition_worldspace;

void main(){

    vec3 LightColor = vec3(1,1,1);
    float LightPower = 20.0f;

// Material properties
    vec3 MaterialDiffuseColor = texture2D( myTextureSampler, UV ).rgb;
    vec3 MaterialAmbientColor = vec3(0.1,0.1,0.1) * MaterialDiffuseColor;
    vec3 MaterialSpecularColor = vec3(0.3,0.3,0.3);

// Distance to the light
    float distance = length( LightPosition_worldspace - Position_worldspace );

// Normal of the computed fragment, in camera space
    vec3 n = normalize( Normal_cameraspace );
// Direction of the light (from the fragment to the light)
    vec3 l = normalize( LightDirection_cameraspace );
// Cosine of the angle between the normal and the light direction, 
// clamped above 0
//  - light is at the vertical of the triangle -> 1
//  - light is perpendicular to the triangle -> 0
//  - light is behind the triangle -> 0
    float cosTheta = clamp( dot( n,l ), 0,1 );

// Eye vector (towards the camera)
    vec3 E = normalize(EyeDirection_cameraspace);
// Direction in which the triangle reflects the light
    vec3 R = reflect(-l,n);
// Cosine of the angle between the Eye vector and the Reflect vector,
// clamped to 0
//  - Looking into the reflection -> 1
//  - Looking elsewhere -> < 1
    float cosAlpha = clamp( dot( E,R ), 0,1 );

outColor.rgb = 
// Ambient : simulates indirect lighting
    MaterialAmbientColor +
// Diffuse : "color" of the object
    MaterialDiffuseColor * LightColor * LightPower * cosTheta / (distance*distance) +
// Specular : reflective highlight, like a mirror
    MaterialSpecularColor * LightColor * LightPower * pow(cosAlpha,5) / (distance*distance);

}

here is the cube loaded :

# cube.obj
#

o cube

v  0.0  0.0  0.0
v  0.0  0.0  1.0
v  0.0  1.0  0.0
v  0.0  1.0  1.0
v  1.0  0.0  0.0
v  1.0  0.0  1.0
v  1.0  1.0  0.0
v  1.0  1.0  1.0

vn  0.0  0.0  1.0
vn  0.0  0.0 -1.0
vn  0.0  1.0  0.0
vn  0.0 -1.0  0.0
vn  1.0  0.0  0.0
vn -1.0  0.0  0.0

vt 0.25 0.0
vt 0.5  0.0
vt 0    0.25
vt 0.25 0.25
vt 0.5  0.25
vt 0.75 0.25
vt 0.0  0.5
vt 0.25 0.5
vt 0.5  0.5
vt 0.75 0.5
vt 0.25 0.75
vt 0.5  0.75
vt 0.25 1.0
vt 0.5  1.0

f  1/11/2  7/14/2  5/12/2
f  1/11/2  3/13/2  7/14/2 
f  1/7/6  4/4/6  3/3/6 
f  1/7/6  2/8/6  4/4/6 
f  3/1/3  8/5/3  7/2/3 
f  3/1/3  4/4/3  8/5/3 
f  5/10/5  7/6/5  8/5/5 
f  5/10/5  8/5/5  6/9/5 
f  1/11/4  5/12/4  6/9/4 
f  1/11/4  6/9/4  2/8/4 
f  2/8/1  6/9/1  8/5/1 
f  2/8/1  8/5/1  4/4/1 

and how i load my VBOs :

glGenVertexArrays(1, &vao);
        glBindVertexArray(vao);

    // Create a Vertex Buffer Object and copy the vertex data to it
        glGenBuffers(1, &position_array_buffer);
        glBindBuffer(GL_ARRAY_BUFFER, position_array_buffer);
        glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(glm::vec3), &vertices[0], GL_STATIC_DRAW);


    // Create an element array
        glGenBuffers(1, &elements_array_buffer);
        glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elements_array_buffer);
        glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size()*sizeof(GLuint), &indices[0], GL_STATIC_DRAW);

        glGenBuffers(1, &normal_array_buffer);
        glBindBuffer(GL_ARRAY_BUFFER, normal_array_buffer);
        glBufferData(GL_ARRAY_BUFFER, normals.size() * sizeof(glm::vec3), &normals[0], GL_STATIC_DRAW);

        if (textured) {
            texture = new sf::Texture();
        if(!texture->loadFromFile("textures/uv.jpeg"/*,sf::IntRect(0, 0, 128, 128)*/))
            std::cout << "Error loading texture !!" << std::endl;
            glGenBuffers(1, &color_array_buffer);
            glBindBuffer(GL_ARRAY_BUFFER, color_array_buffer);
            glBufferData(GL_ARRAY_BUFFER, uvs.size() * sizeof(glm::vec3), &uvs[0], GL_STATIC_DRAW);
        } 

Here is the code that concerns rendering the scene :

// Get a handle for our "myTextureSampler" uniform
        GLuint TextureID  = glGetUniformLocation(shaderProgram, "myTextureSampler");
        if(!TextureID)
            cout << "TextureID not found ..." << endl;
        glActiveTexture(GL_TEXTURE0);
        sf::Texture::bind(texture);
        glUniform1i(TextureID, 0);
    // 2nd attribute buffer : UVs
        GLuint vertexUVID = glGetAttribLocation(shaderProgram, "color");
        if(vertexUVID==-1)
            cout << "vertexUVID not found ..." << endl;
        glEnableVertexAttribArray(vertexUVID);
        glBindBuffer(GL_ARRAY_BUFFER, color_array_buffer);
        glVertexAttribPointer(vertexUVID, 2, GL_FLOAT, GL_FALSE, 0, 0);    

        // 3rd attribute buffer : normals
    GLuint vertexNormal_modelspaceID = glGetAttribLocation(shaderProgram, "normal");
    if(!vertexNormal_modelspaceID)
        cout << "vertexNormal_modelspaceID not found ..." << endl;
    glEnableVertexAttribArray(vertexNormal_modelspaceID);
    glBindBuffer(GL_ARRAY_BUFFER, normal_array_buffer);
    glVertexAttribPointer(vertexNormal_modelspaceID, 3, GL_FLOAT, GL_FALSE, 0, 0 );


    // Specify the layout of the vertex data
    GLint posAttrib;
    posAttrib = glGetAttribLocation(shaderProgram, "position");
    // glBindAttribLocation(shaderProgram,posAttrib,"position");
    if(!posAttrib)
        cout << "posAttrib not found ..." << endl;

    glEnableVertexAttribArray(posAttrib);
    glBindBuffer(GL_ARRAY_BUFFER, position_array_buffer);
    glVertexAttribPointer(posAttrib, 3, GL_FLOAT, GL_FALSE, 0, 0);

    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elements_array_buffer);
        // Draw a rectangle from the indices_size/3 triangles using indices_size indices
    glDrawElements(GL_TRIANGLES, indices.size(), GL_UNSIGNED_INT, 0);
    // glDrawArrays(GL_TRIANGLES,posAttrib,indices.size());

    while ((error = glGetError()) != GL_NO_ERROR) {
        cerr << "OpenGL error: " << error << endl;
    }

I have the feeling that my normals are not loaded properly, furthermore i was wondering if in my element array i had to put informations about normals and uvs or if those were just taken in the classic way without indexing.

EDIT : changed the parser, now the vertices are loading fine but lighting and texture are not applied properly.


Solution

    1. Here:

      normals.reserve(indices.size()); 
      uvs.reserve(indices.size());
      

      do not alter the size but just capacity (try yourself: http://ideone.com/FbXtbm), so e.g. this

      glBufferData(GL_ARRAY_BUFFER, /*->*/normals.size() /*<-*/ * sizeof(glm::vec3), &normals[0], GL_STATIC_DRAW);
      

      receives a zero buffer size as an argument.

    2. There's a syntax error in the fragment shader

      in vec3 LightDirection_cameraspace;
      
      /*->*/ out vec4 outColor /*<-*/
      
      // Values that stay constant for the whole mesh.
      

      Add a ";" after outColor.

    3. Your arrays are not properly set for your glDrawElements call. I'll add some sample code after I'll have had my breakfast coffee.

      EDIT 11:02

      The cube has 8 vertices, and to draw it properly, you need 3 normals for each. (for simplicity Sake, I did the same with the uvs):

       }
                  indices.push_back(vertexIndex[0]-1);
                  indices.push_back(vertexIndex[1]-1);
                  indices.push_back(vertexIndex[2]-1);
                  uvIndices    .push_back(uvIndex[0]-1);
                  uvIndices    .push_back(uvIndex[1]-1);
                  uvIndices    .push_back(uvIndex[2]-1);
                  normalIndices.push_back(normalIndex[0]-1);
                  normalIndices.push_back(normalIndex[1]-1);
                  normalIndices.push_back(normalIndex[2]-1);
          }else{
          // Probably a comment, eat up the rest of the line
              char stupidBuffer[1000];
              fgets(stupidBuffer, 1000, file);
          }
      }
      
      
      
      #if 1 // EITHER
          vertices.resize(indices.size());
          normals.resize(indices.size());
          uvs.resize(indices.size());
      
          for( unsigned int i=0; i<indices.size(); ++i){
              vertices[i] = temp_vertices[indices[i]];
              normals[i] = temp_normals[normalIndices[i]];
              uvs[i] = temp_uvs[uvIndices[i]];
          }
      #else // OR   
          vertices.reserve(indices.size());
          normals.reserve(indices.size());
          uvs.reserve(indices.size());
      
          for( unsigned int i=0; i<indices.size(); ++i){
              vertices.push_back(temp_vertices[indices[i]]);
              normals.push_back(temp_normals[normalIndices[i]]);
              uvs.push_back(temp_uvs[uvIndices[i]]);
          }
      #endif
      
          struct yield { 
              int i;
              yield() : i(0) {}
              int operator() (){ return i++;}
          };
      
          std::generate(indices.begin(), indices.end(), yield());
      
      
      
          std::clog   << "num vertices: " << vertices.size() << std::endl
                      << "num normals:  " << normals.size()  << std::endl
                      << "num uvs:  "     << uvs.size()      << std::endl
                      << "num indices:  " << indices.size()  << std::endl;
      

      Pls note that I also altered sth in the loop; I decremented all indices right there. One would not have to unfold all indices of all triangles, but this is the simplest way.

    4. I also re-factored your shaders

      #version 150 core
      
      in vec2 color;
      in vec3 position;
      in vec3 normal; 
      
      
      out vec2 UV;
      out vec3 Normal_cameraspace;
      out vec3 EyeDirection_cameraspace;
      out vec3 LightDirection_cameraspace;
      
      uniform mat4 MVP;
      uniform mat4 V;
      uniform mat4 M;
      uniform vec3 LightPosition_worldspace;
      
      
      void main() {
      
          // Position of the vertex, in worldspace : M * position
           vec3 wPos = (M * vec4(position, 1.0)).xyz;
      
          // Vector that goes from the vertex to the camera, in camera space.
          // In camera space, the camera is at the origin (0,0,0).
          vec3 vertexPosition_cameraspace = ( V * M * vec4(position,1)).xyz;
          EyeDirection_cameraspace = -vertexPosition_cameraspace;
      
          // Vector that goes from the vertex to the light, in camera space. M is ommited because it's identity.
          vec3 LightPosition_cameraspace = ( V * vec4(LightPosition_worldspace,1)).xyz;
          LightDirection_cameraspace = LightPosition_cameraspace - vertexPosition_cameraspace;
      
          // Normal of the the vertex, in camera space
      #if 0    
          // Only correct if ModelMatrix does not scale the model ! Use its inverse transpose if not.
          Normal_cameraspace = (V * M * vec4(normal,0)).xyz; 
      #else
          Normal_cameraspace = mat3(V) * inverse(transpose(mat3(M))) * normal;
      #endif        
          Normal_cameraspace = normalize(Normal_cameraspace);
          // UV of the vertex. No special space for this one.
          UV = color;
          gl_Position = MVP*vec4(position, 1.0);
      } // void main()
      
      #version 150 core
      
      // Interpolated values from the vertex shaders
      in vec2 UV;
      in vec3 Normal_cameraspace;
      in vec3 EyeDirection_cameraspace;
      in vec3 LightDirection_cameraspace;
      
      out vec4 outColor;
      
      const float SHININESS       = 5.0;
      const float AMBIENCE        = 0.1;
      const float SPECULARITY     = 0.3;
      const vec3  LIGHT_COLOR     = vec3(1.0, 1.0, 1.0);
      const float LIGHT_INTENSITY = 300.0;
      
      
      //uniform sampler2D myTextureSampler;
      //uniform vec3 LightPosition_worldspace;
      
      float lambert_fac(vec3 lightPos, vec3 normal) {
          vec3 l_ = normalize(lightPos);
          vec3 n_ = normalize(normal);
      
          return max(dot(l_, n_),0.0);
      }
      
      float phong_fac(vec3 eyePos, vec3 lightPos, vec3 normal, float shiny) {
          vec3 e_ = normalize(eyePos);
          vec3 l_ = normalize(lightPos);
          vec3 n_ = normalize(normal);
          vec3 r_ = normalize(reflect(-l_, n_));
      
          return pow(max(dot(r_, e_),0.0), shiny);
      }
      
      float attenuate(float d/*distance*/, float c/*constant*/, 
                          float l/*linear*/, float q/*quadratic*/) {
          return 1.0/(c + l * d + q * d * d);    
      }
      
      struct Material {
          vec3 ambient, diffuse, specular;
      };
      
      void main(){
          Material mat;
      
          /*texture2D( myTextureSampler, UV ).rgb;*/
          vec3 baseColor =
              vec3(UV.s, UV.t, clamp(UV.s + UV.t,0.,1.)); // just to save some attributes contributing to 
                                                         // from the optimizer
          mat.ambient =  mat.diffuse = mat.specular = baseColor;
          mat.ambient *= AMBIENCE; mat.specular *= SPECULARITY;
      
          // attenuation
          float att = attenuate(length(LightDirection_cameraspace), 0., 0., 1.);
      
          // light
          vec3 l_ = LIGHT_COLOR * LIGHT_INTENSITY * att; 
      
          // Diffuse Contribution
          float dc = lambert_fac(LightDirection_cameraspace, Normal_cameraspace);
      
          // Specular Contribution
          float sc = phong_fac(EyeDirection_cameraspace, 
                              LightDirection_cameraspace,
                              Normal_cameraspace, 
                              SHININESS);
      
          outColor = vec4(mat.ambient 
                          + mat.diffuse * dc * l_ 
                          + mat.specular * sc * l_, 1.0);
      } // void main()
      

      And this:

      screenshot

      is what it looks like now