I'm trying to load an MD2 model but I can't seem to get the vertices to draw correctly. I'm not loading UVs or normals at the moment just want to see the model appear correctly in a single frame then take it from there.
Here's my md2 structures (mostly taken from here):
struct v3
{
union
{
struct
{
union { float x; float r; };
union { float y; float g; };
union { float z; float b; };
};
float At[3];
};
};
struct md2_header
{
unsigned int Magic;
unsigned int Version;
unsigned int TextureWidth;
unsigned int TextureHeight;
unsigned int FrameSize;
unsigned int NumTextures;
unsigned int NumVertices;
unsigned int NumUVs;
unsigned int NumTrigs;
unsigned int NumGLCommands;
unsigned int NumFrames;
unsigned int OffsetTextures;
unsigned int OffsetUVs;
unsigned int OffsetTrigs;
unsigned int OffsetFrames;
unsigned int OffsetGLCommands;
unsigned int OffsetEnd;
};
struct md2_vertex
{
unsigned char At[3];
unsigned char NormalIndex;
};
struct md2_frame
{
float Scale[3];
float Translate[3];
char Name[16];
md2_vertex *Vertices;
};
struct md2_skin
{
char Name[64];
};
struct md2_uv
{
unsigned short u;
unsigend short v;
}
struct md2_triangle
{
unsigned short Vertices[3];
unsigned short UVs[3];
};
struct md2_model
{
md2_header Header;
md2_uv *UVs;
md2_triangle *Triangles;
md2_frame *Frames;
md2_skin *Skins;
int *GLCommands;
unsigned int Texture;
unsigned int VAO, VBO;
};
And here's my simple loading function:
void MD2LoadModel (char *FilePath, md2_model *Model)
{
FILE *File = fopen (FilePath, "rb");
if (!File)
{
fprintf (stderr, "Error: couldn't open \"%s\"!\n", FilePath);
return;
}
#define FREAD(Dest, Type, Count)\
fread(Dest, sizeof(Type), Count, File)
#define FSEEK(Offset)\
fseek(File, Offset, SEEK_SET)
#define ALLOC(Type, Count)\
(Type *)malloc(sizeof(Type) * Count)
/* Read Header */
FREAD(&Model->Header, md2_header, 1);
if ((Model->Header.Magic != 844121161) ||
(Model->Header.Version != 8))
{
fprintf (stderr, "Error: bad md2 Version or identifier\n");
fclose (File);
return;
}
/* Memory allocations */
Model->Skins = ALLOC(md2_skin, Model->Header.NumTextures);
Model->UVs = ALLOC(md2_uv, Model->Header.NumUVs);
Model->Triangles = ALLOC(md2_triangle, Model->Header.NumTrigs);
Model->Frames = ALLOC(md2_frame, Model->Header.NumFrames);
Model->GLCommands = ALLOC(int, Model->Header.NumGLCommands);
/* Read model data */
FSEEK(Model->Header.OffsetTextures);
FREAD(Model->Skins, md2_skin, Model->Header.NumTextures);
FSEEK(Model->Header.OffsetUVs);
FREAD(Model->UVs, md2_uv, Model->Header.NumUVs);
FSEEK(Model->Header.OffsetTrigs);
FREAD(Model->Triangles, md2_triangle, Model->Header.NumTrigs);
FSEEK(Model->Header.OffsetGLCommands);
FREAD(Model->GLCommands, int, Model->Header.NumGLCommands);
/* Read frames */
FSEEK(Model->Header.OffsetFrames);
for (int i = 0; i < Model->Header.NumFrames; i++)
{
/* Memory allocation for vertices of this frame */
Model->Frames[i].Vertices = (md2_vertex *)
malloc(sizeof(md2_vertex) * Model->Header.NumVertices);
/* Read frame data */
FREAD(&Model->Frames[i].Scale, v3, 1);
FREAD(&Model->Frames[i].Translate, v3, 1);
FREAD(Model->Frames[i].Name, char, 16);
FREAD(Model->Frames[i].Vertices, md2_vertex, Model->Header.NumVertices);
}
v3 *Vertices = ALLOC(v3, Model->Header.NumVertices);
md2_frame *Frame = &Model->Frames[0];
For(u32, i, Model->Header.NumVertices)
{
Vertices[i] = V3(
(Frame->Vertices[i].At[0] * Frame->Scale[0]) + Frame->Translate[0],
(Frame->Vertices[i].At[1] * Frame->Scale[1]) + Frame->Translate[1],
(Frame->Vertices[i].At[2] * Frame->Scale[2]) + Frame->Translate[2]);
}
glGenBuffers(1, &Model->VBO);
glBindBuffer(GL_ARRAY_BUFFER, Model->VBO);
glBufferData(GL_ARRAY_BUFFER, Model->Header.NumVertices * sizeof(v3), Vertices, GL_STATIC_DRAW);
glGenVertexArrays(1, &Model->VAO);
glBindVertexArray(Model->VAO);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
fclose (File);
free(Vertices);
#undef FSEEK
#undef FREAD
#undef ALLOC
}
Only passing the vertices data. Which, from my understanding Header->NumVertices
is the number of vertices in each frame. So I'm taking an arbitrary frame (frame 0 in this case) and reading its uncompressed vertices data into Vertices
.
Now I read in a book that Quake had their y and z axes flipped, but that still didn't change much.
Here's how I'm drawing the model:
GLuint Shader = Data->Shaders.Md2Test;
ShaderUse(Shader);
ShaderSetM4(Shader, "view", &WorldToView);
ShaderSetM4(Shader, "projection", &ViewToProjection);
glBindVertexArray(DrFreak.VAO);
{
ModelToWorld = m4_Identity;
ShaderSetM4(Shader, "model", &ModelToWorld);
glDrawArrays(GL_TRIANGLES, 0, DrFreak.Header.NumVertices);
}
glBindVertexArray(0);
The matrices are calculated in a CameraUpdate function which I can verify is working correctly because everything else in the scene render properly except the MD2 model. See:
Everything in yellow is supposed to be the MD2 model.
Here are my shaders (pretty much the same shaders for the crates and planes except there's only one 'in' variable, the position and no UVs):
#version 330 core
layout (location = 0) in vec3 position;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4(position, 1.0f);
}
#version 330 core
out vec4 color;
void main()
{
color = vec4(1, 1, 0, 1);
}
I've been stuck here for a couple of days. I stepped into the loading code and I seem to be getting valid values. I'm not sure what's the issue. What am I doing wrong/missing?
Any help is appreciated.
I fixed the problem by duplicating the vertices/uvs getting them from the tirangles data. I didn't have to flip the 't' UV coordinate like many tutorials do. I switched the y and z coordinates cause they're flipped.
u32 NumVerts = Model->Header.NumTrigs * 3;
u32 NumUVs = NumVerts;
v3 *Vertices = ALLOC(v3, NumVerts);
v2 *UVs = ALLOC(v2, NumUVs);
md2_frame *Frame = &Model->Frames[0]; // render first frame for testing
For(u32, i, Model->Header.NumTrigs)
{
For(u32, j, 3)
{
u32 VertIndex = Model->Triangles[i].Vertices[j];
Vertices[i * 3 + j] = V3(
(Frame->Vertices[VertIndex].At[0] * Frame->Scale[0]) + Frame->Translate[0],
(Frame->Vertices[VertIndex].At[2] * Frame->Scale[2]) + Frame->Translate[2],
(Frame->Vertices[VertIndex].At[1] * Frame->Scale[1]) + Frame->Translate[1]);
u32 UVIndex = Model->Triangles[i].UVs[j];
UVs[i * 3 + j] = V2(
Model->UVs[UVIndex].u / (r32)Model->Header.TextureWidth,
Model->UVs[UVIndex].v / (r32)Model->Header.TextureHeight);
}
}
glGenVertexArrays(1, &Model->VAO);
glBindVertexArray(Model->VAO);
glGenBuffers(1, &Model->VBO);
glBindBuffer(GL_ARRAY_BUFFER, Model->VBO);
glBufferData(GL_ARRAY_BUFFER, NumVerts * sizeof(v3), Vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
u32 UVBuffer;
glGenBuffers(1, &UVBuffer);
glBindBuffer(GL_ARRAY_BUFFER, UVBuffer);
glBufferData(GL_ARRAY_BUFFER, NumUVs * sizeof(v2), UVs, GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, 0);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
I will probably use indexed arrays and glDrawElements
. But for my testing purposes glDrawArrays
is good enough. If anyone knows of a better way to do all this feel free to leave a comment.
And there's Dr Freak chillin'