I'm trying to render an video through OpenGL in GTK3 on linux. These Vertex and Fragment shaders were used with succes on QT. Actually all the opengl function calls are the same from working examples on Qt.
Anyone knows why? Is it possible that GLEW coordinates are different from coordinates in QT OpenGL?
The video is in YUV420P format, so that's why the fragment shader does matrix multiplication. Is it possible that my fragment coordinates are wrong?
Anyways, here's the video:
My Vertex Shader:
#version 130
attribute vec4 vertexIn;
attribute vec2 textureIn;
varying vec2 textureOut;
void main(void)
{
gl_Position = vertexIn;
textureOut = textureIn;
}
My Fragment Shader:
#version 130
varying vec2 textureOut;
uniform sampler2D tex_y;
uniform sampler2D tex_u;
uniform sampler2D tex_v;
void main(void)
{
vec3 yuv;
vec3 rgb;
yuv.x = texture2D(tex_y, textureOut).r;
yuv.y = texture2D(tex_u, textureOut).r - 0.5;
yuv.z = texture2D(tex_v, textureOut).r - 0.5;
rgb = mat3(1.0, 1.0, 1.0,
0.0, -0.39465, 2.03211,
1.13983, -0.58060, 0.0) * yuv;
gl_FragColor = vec4(rgb, 1.0);
}
My rendering code:
static const GLfloat ver[] = {
-1.0f,-1.0f,
1.0f,-1.0f,
-1.0f, 1.0f,
1.0f, 1.0f
};
static const GLfloat tex[] = {
0.0f, 1.0f,
1.0f, 1.0f,
0.0f, 0.0f,
1.0f, 0.0f
};
void OpenGLArea::init()
{
std::cout << "OpenGLArea init" << std::endl;
set_size_request(640, 360);
Singleton::instance()->getStream("cam1").mediaStream->ffmpegDecoder->setVideoReceiver(this);
}
void OpenGLArea::receiveVideo(unsigned char **videoBuffer, int frameWidth, int frameHeight)
{
this->frameWidth = frameWidth;
this->frameHeight = frameHeight;
//Before first render, datas pointer isn't even created yet
if (!firstFrameReceived)
{
buffer[0] = new unsigned char[frameWidth * frameHeight]; //Y
buffer[1] = new unsigned char[frameWidth * frameHeight / 4]; //U
buffer[2] = new unsigned char[frameWidth * frameHeight / 4]; //V
firstFrameReceived = true;
}
else
{
memcpy(buffer[0], videoBuffer[0], frameWidth * frameHeight);
memcpy(buffer[1], videoBuffer[1], frameWidth * frameHeight / 4);
memcpy(buffer[2], videoBuffer[2], frameWidth * frameHeight / 4);
}
//glDraw();
}
void OpenGLArea::glInit()
{
int frameWidth = 640;
int frameHeight = 360;
glClearColor(0.0f, 0.0f, 0.4f, 0.0f);
Shader vertex_shader(ShaderType::Vertex, "vertex.shader");
Shader fragment_shader(ShaderType::Fragment, "fragment.shader");
program = new Program();
program->attach_shader(vertex_shader);
program->attach_shader(fragment_shader);
program->link();
glGenTextures(3, texs);//TODO: delete texture
//Y
glBindTexture(GL_TEXTURE_2D, texs[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, frameWidth, frameHeight, 0, GL_RED, GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//U
glBindTexture(GL_TEXTURE_2D, texs[1]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, frameWidth / 2, frameHeight / 2, 0, GL_RED, GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//V
glBindTexture(GL_TEXTURE_2D, texs[2]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, frameWidth / 2, frameHeight / 2, 0, GL_RED, GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
}
void OpenGLArea::glDraw()
{
program->use();
glVertexAttribPointer(A_VER, 2, GL_FLOAT, 0, 0, ver);
glEnableVertexAttribArray(A_VER);
glVertexAttribPointer(T_VER, 2, GL_FLOAT, 0, 0, tex);
glEnableVertexAttribArray(T_VER);
unis[0] = glGetAttribLocation(program->get_id(), "tex_y");
unis[1] = glGetAttribLocation(program->get_id(), "tex_u");
unis[2] = glGetAttribLocation(program->get_id(), "tex_v");
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texs[0]);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, frameWidth, frameHeight, GL_RED, GL_UNSIGNED_BYTE, buffer[0]);
glUniform1i(unis[0], 0);
glActiveTexture(GL_TEXTURE0 + 1);
glBindTexture(GL_TEXTURE_2D, texs[1]);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, frameWidth / 2, frameHeight / 2, GL_RED, GL_UNSIGNED_BYTE, buffer[1]);
glUniform1i(unis[1], 1);
glActiveTexture(GL_TEXTURE0 + 2);
glBindTexture(GL_TEXTURE_2D, texs[2]);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, frameWidth / 2, frameHeight / 2, GL_RED, GL_UNSIGNED_BYTE, buffer[2]);
glUniform1i(unis[2], 2);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
}
class GLWindow : public Gtk::Window
{
public:
GLWindow()
{
vbox = new Gtk::VBox;
drawing_area = new OpenGLArea();
vbox->pack_start(*drawing_area, true, true);
add(*vbox);
}
private:
Gtk::Button *button;
Gtk::VBox *vbox;
OpenGLArea *drawing_area;
};
Also, I'm only getting image updates when I resize or refocus on the screen. It's possible that I'm forgetting to call some function when the video is updated. Anyone knows which function is this?
ps: OpenGLArea
is subclass of Gtk::DrawingArea
UPDATE:
I just noticed that in the lines
unis[0] = glGetAttribLocation(program->get_id(), "tex_y");
unis[1] = glGetAttribLocation(program->get_id(), "tex_u");
unis[2] = glGetAttribLocation(program->get_id(), "tex_v");
unis[i]
always have the same value: 4294967295
, so glGetAttribLocation
isn't returning anything
GLEW
is a library that is used for retrieving the function pointers to OpenGL functions. It's needed for OGL version > 1.1.
So, don't think of Glew as the culprit of your vertices issue. If you need to change the order of vertices is due to Winding order.
The order also may be the cause of drawing a texture upside-down.
glGetAttribLocation
is used for attributes. In your VS these are vertexIn
and textureIn
.
For uniforms (your tex_XXX
) you must use glGetUniformLocation
For the resizing issue, this question may be useful.
In short, you must connect a callback for the "configure-event" and use glViewport