My app is displaying an image in full screen by using OpenGL shader code as shown below. The vertex shader I used here is copied from somewhere. Can anyone explain why here used vTexCoord = (a_position.xy+1)/2;
? When I'm trying with vTexCoord = a_position.xy
, my OpenGL output is split into four rectangles and only it's top right portion is showing the image. Other three sides are seem as blurred. What change should I do to work it with vTexCoord = a_position.xy
?
Some important functions used in the project are shown below. Please check and help to correct.
float[] vertices = {
// Left bottom triangle
-1f, -1f, 0f,
1f, -1f, 0f,
1f, 1f, 0f,
// Right top triangle
1f, 1f, 0f,
-1f, 1f, 0f,
-1f, -1f, 0f
};
private void CreateShaders()
{
/***********Vert Shader********************/
vertShader = GL.CreateShader(ShaderType.VertexShader);
GL.ShaderSource(vertShader, @"attribute vec3 a_position;
varying vec2 vTexCoord;
void main() {
vTexCoord = (a_position.xy+1)/2;
gl_Position = vec4(a_position, 1);
}");
GL.CompileShader(vertShader);
/***********Frag Shader ****************/
fragShader = GL.CreateShader(ShaderType.FragmentShader);
GL.ShaderSource(fragShader, @"precision highp float;
uniform sampler2D sTexture;varying vec2 vTexCoord;
void main ()
{
vec4 color= texture2D (sTexture, vTexCoord);
gl_FragColor =color;
}");
GL.CompileShader(fragShader);
}
private void InitBuffers()
{
buffer = GL.GenBuffer();
positionLocation = GL.GetAttribLocation(program, "a_position");
positionLocation1 = GL.GetUniformLocation(program, "sTexture");
GL.EnableVertexAttribArray(positionLocation);
GL.BindBuffer(BufferTarget.ArrayBuffer, buffer);
GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(vertices.Length * sizeof(float)), vertices, BufferUsageHint.StaticDraw);
GL.VertexAttribPointer(positionLocation, 3, VertexAttribPointerType.Float, false, 0, 0);
}
public void DrawImage(int image)
{
GL.Viewport(new Rectangle(0, 0, ScreenWidth, ScreenHeight));
GL.MatrixMode(MatrixMode.Projection);
GL.PushMatrix();
GL.LoadIdentity();
//GL.Ortho(0, 1920, 0, 1080, 0, 1);
GL.MatrixMode(MatrixMode.Modelview);
GL.PushMatrix();
GL.LoadIdentity();
GL.Disable(EnableCap.Lighting);
GL.Enable(EnableCap.Texture2D);
GL.ActiveTexture(TextureUnit.Texture0);
GL.BindTexture(TextureTarget.Texture2D, image);
GL.Uniform1(positionLocation1, 0);
GL.Begin(PrimitiveType.Quads);
GL.TexCoord2(0, 1);
GL.Vertex3(0, 0, 0);
GL.TexCoord2(0, 0);
GL.Vertex3(1920, 0, 0);
GL.TexCoord2(1, 1);
GL.Vertex3(1920, 1080, 0);
GL.TexCoord2(1, 0);
GL.Vertex3(0, 1080, 0);
GL.End();
RunShaders();
GL.Disable(EnableCap.Texture2D);
GL.PopMatrix();
GL.MatrixMode(MatrixMode.Projection);
GL.PopMatrix();
GL.MatrixMode(MatrixMode.Modelview);
glControl1.SwapBuffers();
}
private void RunShaders()
{
GL.UseProgram(program);
GL.DrawArrays(PrimitiveType.Triangles, 0, vertices.Length / 3);
}
Can anyone explain why here used
vTexCoord = (a_position.xy+1)/2;
?
The vertex coordinates in you example are in range [-1, 1], for the x and component. This matches to the normalized device space. The normalized device space is a cube from the left-lower-front (-1, -1, -1), to the right-top-back (1, 1, 1), this is the area wich is "visible". This area is mapped to the viewport.
This causes that the vertex coordinates, in your example, form a rectangle, which covers the entire viewport.
If the texture coordinates should wrap the entire texture to the quad, then the texture coordinates (u, v) have to be in range [0, 1]. (0, 0) is the lower left of the texture and (1, 1) the upper right.
See also How do OpenGL texture coordinates work?
So the x and y component of a_position
have to be mapped form the range [-1, 1], to the range [0, 1], to be used as uv coordinates for the texture lookup:
u = (a_position.x + 1) / 2
v = (a_position.y + 1) / 2
What change should I do to work it with
vTexCoord = a_position.xy
?
This is not possible, but you can generate a separate texture coordinate attribute, which is common:
attribute vec3 a_position;
attribute vec2 a_texture;
varying vec2 vTexCoord;
void main() {
vTexCoord = a_texture;
gl_Position = vec4(a_position, 1);
}
float[] vertices = {
// x y z u v
// Left bottom triangle
-1f, -1f, 0f, 0f, 0f
1f, -1f, 0f, 1f, 0f
1f, 1f, 0f, 1f, 1f
// Right top triangle
1f, 1f, 0f, 1f, 1f
-1f, 1f, 0f, 0f, 1f
-1f, -1f, 0f 0f, 0f
};
buffer = GL.GenBuffer();
GL.BindBuffer(BufferTarget.ArrayBuffer, buffer);
GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(vertices.Length * sizeof(float)), vertices, BufferUsageHint.StaticDraw);
positionLocation = GL.GetAttribLocation(program, "a_position");
tetureLocation = GL.GetAttribLocation(program, "a_texture");
GL.EnableVertexAttribArray(positionLocation);
GL.EnableVertexAttribArray(tetureLocation);
int stride = sizeof(float) * 5; // 5 because of (x, y, z, u, v)
int offsetUV = sizeof(float) * 3; // 3 because the u and v coordinates are the 4th and 5th coordinate
GL.VertexAttribPointer(positionLocation, 3, VertexAttribPointerType.Float, false, stride, 0);
GL.VertexAttribPointer(tetureLocation, 2, VertexAttribPointerType.Float, false, stride, (IntPtr)(offsetUV));