push as is, half baked broken twn_models.c

This commit is contained in:
veclavtalica 2025-02-14 19:51:34 +03:00
parent 0df0a9226f
commit f81c583319
10 changed files with 460 additions and 49 deletions

View File

@ -97,7 +97,7 @@ set(TWN_NONOPT_SOURCE_FILES
src/twn_filewatch.c src/twn_filewatch_c.h
src/twn_filewatch.c src/twn_filewatch_c.h
src/twn_timer.c src/twn_timer_c.h
src/twn_workers.c src/twn_workers_c.h
src/twn_workers.c src/twn_workers_c.h
src/rendering/twn_draw.c src/rendering/twn_draw_c.h
src/rendering/twn_quads.c
@ -108,7 +108,7 @@ set(TWN_NONOPT_SOURCE_FILES
src/rendering/twn_billboards.c
src/rendering/twn_circles.c
src/rendering/twn_skybox.c
src/rendering/twn_model.c
src/rendering/twn_models.c
)
set(TWN_SOURCE_FILES

View File

@ -197,7 +197,9 @@ static void ingame_tick(State *state) {
input_action("mouse_capture_toggle", "ESCAPE");
input_action("toggle_camera_mode", "C");
draw_model("models/test.obj", (Vec3){0}, (Vec3){0,0,1}, (Vec3){1,1,1});
// draw_model("models/test.obj", (Vec3){0}, (Vec3){0,0,1}, (Vec3){1.f / 64,1.f / 64,1.f / 64});
// draw_model("models/test2.obj", (Vec3){0}, (Vec3){0,0,1}, (Vec3){1.f / 64,1.f / 64,1.f / 64});
// draw_model("models/bunny.obj", (Vec3){0}, (Vec3){0,0,1}, (Vec3){4.,4.,4.});
if (scn->mouse_captured) {
const float sensitivity = 0.4f * (float)DEG2RAD; /* TODO: put this in a better place */

View File

@ -415,7 +415,9 @@ static void render_space(void) {
void render(void) {
models_update_pre_textures();
textures_update_atlas(&ctx.texture_cache);
models_update_post_textures();
/* fit rendering context onto the resizable screen */
if (ctx.window_size_has_changed) {
@ -471,7 +473,6 @@ void draw_camera(Vec3 position, Vec3 direction, Vec3 up, float fov, float zoom)
/* TODO: https://stackoverflow.com/questions/62493770/how-to-add-roll-in-camera-class */
/* TODO: check for NaNs and alike */
/* TODOL call draw_camera() instead, to reuse the code */
DrawCameraFromPrincipalAxesResult draw_camera_from_principal_axes(Vec3 position,
float roll,

View File

@ -228,6 +228,8 @@ typedef struct ElementIndexedBillboard {
} ElementIndexedBillboard;
/* state */
bool render_init(void);
/* renders the background, then the primitives in all render queues */
@ -236,26 +238,20 @@ void render(void);
/* clears all render queues */
void render_clear(void);
/* fills two existing arrays with the geometry data of a circle */
/* the size of indices must be at least 3 times the number of vertices */
void create_circle_geometry(Vec2 position,
float radius,
size_t num_vertices,
Vec2 vertices[]);
void setup_viewport(int x, int y, int width, int height);
struct QuadBatch {
size_t size; /* how many primitives are in current batch */
TextureKey texture_key;
TextureMode mode; /* how color should be applied */
bool constant_colored; /* whether colored batch is uniformly colored */
bool repeat; /* whether repeat is needed */
bool textured;
} collect_quad_batch(const Primitive2D primitives[], size_t len);
void render_quad_batch(const Primitive2D primitives[], struct QuadBatch batch);
struct QuadBatch collect_sprite_batch(const Primitive2D primitives[], size_t len);
struct QuadBatch collect_rect_batch(const Primitive2D primitives[], size_t len);
void render_sprite_batch(const Primitive2D primitives[], struct QuadBatch batch);
void render_rect_batch(const Primitive2D primitives[], struct QuadBatch batch);
void clear_draw_buffer(void);
void finally_clear_draw_buffer(DeferredCommandClear command);
void swap_buffers(void);
void set_depth_range(double low, double high);
void start_render_frame(void);
void end_render_frame(void);
void finally_draw_command(DeferredCommandDraw command);
void issue_deferred_draw_commands(void);
/* text */
@ -279,21 +275,32 @@ void delete_vertex_buffer(VertexBuffer buffer);
void specify_vertex_buffer(VertexBuffer buffer, void const *data, size_t bytes);
/* uses present in 1.5 buffer mapping feature */
VertexBufferBuilder build_vertex_buffer(VertexBuffer buffer, size_t bytes);
void finish_vertex_builder(VertexBufferBuilder *builder);
/* state */
/* 2d */
void setup_viewport(int x, int y, int width, int height);
/* fills two existing arrays with the geometry data of a circle */
/* the size of indices must be at least 3 times the number of vertices */
void create_circle_geometry(Vec2 position,
float radius,
size_t num_vertices,
Vec2 vertices[]);
void clear_draw_buffer(void);
void finally_clear_draw_buffer(DeferredCommandClear command);
void swap_buffers(void);
void set_depth_range(double low, double high);
struct QuadBatch {
size_t size; /* how many primitives are in current batch */
TextureKey texture_key;
TextureMode mode; /* how color should be applied */
bool constant_colored; /* whether colored batch is uniformly colored */
bool repeat; /* whether repeat is needed */
bool textured;
} collect_quad_batch(const Primitive2D primitives[], size_t len);
void render_quad_batch(const Primitive2D primitives[], struct QuadBatch batch);
struct QuadBatch collect_sprite_batch(const Primitive2D primitives[], size_t len);
struct QuadBatch collect_rect_batch(const Primitive2D primitives[], size_t len);
void render_sprite_batch(const Primitive2D primitives[], struct QuadBatch batch);
void render_rect_batch(const Primitive2D primitives[], struct QuadBatch batch);
VertexBuffer get_quad_element_buffer(void);
@ -318,30 +325,29 @@ void push_quad_payload_to_vertex_buffer_builder(struct QuadBatch batch,
Vec2 uv0, Vec2 uv1, Vec2 uv2, Vec2 uv3,
Color color);
void finally_draw_text(FontData const *font_data,
size_t len,
Color color,
VertexBuffer buffer);
/* 3d */
void finally_draw_uncolored_space_traingle_batch(MeshBatch const *batch,
TextureKey texture_key);
void finally_draw_billboard_batch(MeshBatch const *batch,
TextureKey texture_key);
void finally_draw_text(FontData const *font_data,
size_t len,
Color color,
VertexBuffer buffer);
void render_skybox(void);
void finally_render_skybox(DeferredCommandDrawSkybox);
void start_render_frame(void);
void end_render_frame(void);
void finally_draw_command(DeferredCommandDraw command);
void issue_deferred_draw_commands(void);
bool model_load_workers_thread(void);
bool models_load_workers_finished(void);
bool models_load_workers_thread(void);
void finally_draw_models(void);
void free_model_cache(void);
void model_state_deinit(void);
void models_state_init(void);
void models_state_deinit(void);
void models_update_pre_textures(void);
void models_update_post_textures(void);
#endif

396
src/rendering/twn_models.c Normal file
View File

@ -0,0 +1,396 @@
#include "twn_draw_c.h"
#include "twn_draw.h"
#include "twn_engine_context_c.h"
#include "twn_util.h"
#include "twn_workers_c.h"
#include "twn_textures_c.h"
#define FAST_OBJ_IMPLEMENTATION
#define FAST_OBJ_REALLOC SDL_realloc
#define FAST_OBJ_FREE SDL_free
#include <fast_obj.h>
#include <stb_ds.h>
#include <physfs.h>
#include <physfsrwops.h>
#include <SDL2/SDL.h>
/* TODO: it might make sense to have a separate path for really small models, collecting them together */
static struct ModelCacheItem {
char *key;
struct ModelCacheItemValue {
/* UncoloredSpaceTriangle to use indices against */
VertexBuffer vertices;
/* array or uint16_t or uint32_t, depending on length */
/* populated in such way that shared textures are combined into continuous range */
VertexBuffer *indices;
// /* note: this whole scheme only works without taking normals into account, but it's quite fast */
// struct ModelCacheIndexRange {
// Rect srcrect;
// size_t offset;
// size_t length;
// TextureKey texture;
// } *ranges;
/* cached base command, modified for ranges */
DeferredCommand *commands;
} value;
} *model_cache;
/* TODO: store index to model cache instead */
static struct ModelDrawCommand {
char *model;
Vec3 position;
Vec3 rotation;
Vec3 scale;
} *model_draw_commands;
/* deferred queue of model files to load from worker threads */
static SDL_mutex *model_load_mutex;
static struct ModelLoadRequest {
char const *path;
fastObjMesh *mesh;
enum {
/* not yet started, only path is available */
MODEL_LOAD_REQUEST_WAITING,
/* initial load of data, unrelated to graphics state and thus applicable to running in worker threads */
MODEL_LOAD_REQUEST_LOADING,
/* mesh is loaded and awaits to be prepared and loaded onto gpu */
MODEL_LOAD_REQUEST_LOADED,
} stage;
} *model_load_queue;
static bool model_load_initialized;
/* use streaming via callbacks to reduce memory congestion */
static void model_load_callback_close(void *handle, void *udata) {
(void)udata;
((SDL_RWops *)handle)->close(handle);
}
static void *model_load_callback_open(const char *path, void *udata) {
(void)udata;
return PHYSFSRWOPS_openRead(path);
}
static size_t model_load_callback_read(void *handle, void *dst, size_t bytes, void *udata) {
(void)udata;
return ((SDL_RWops *)handle)->read(handle, dst, 1, bytes);
}
static unsigned long model_load_callback_size(void *handle, void *udata) {
(void)udata;
return ((SDL_RWops *)handle)->size(handle);
}
/* it's safe to access everything without lock after this returns true and no public api is possible to call */
bool models_load_workers_finished(void) {
bool result = true;
SDL_LockMutex(model_load_mutex);
for (size_t i = 0; i < arrlenu(model_load_queue); ++i) {
if (model_load_queue[i].stage != MODEL_LOAD_REQUEST_LOADED) {
result = false;
break;
}
}
SDL_UnlockMutex(model_load_mutex);
return result;
}
/* entry point for workers, polled every time a job semaphore is posted */
/* returns false if there was nothing to do */
bool models_load_workers_thread(void) {
/* attempt to grab something to work on */
char const *request_path = NULL;
ssize_t queue_index = -1;
SDL_LockMutex(model_load_mutex);
for (size_t i = 0; i < arrlenu(model_load_queue); ++i) {
if (model_load_queue[i].stage == MODEL_LOAD_REQUEST_WAITING) {
request_path = model_load_queue[i].path;
queue_index = i;
model_load_queue[i].stage = MODEL_LOAD_REQUEST_LOADING;
break;
}
}
SDL_UnlockMutex(model_load_mutex);
/* nothing to do, bail */
if (queue_index == -1)
return false;
fastObjCallbacks const callbacks = {
.file_close = model_load_callback_close,
.file_open = model_load_callback_open,
.file_read = model_load_callback_read,
.file_size = model_load_callback_size
};
/* TODO: would be nice if we could start dependency texture load immediately */
fastObjMesh *const mesh = fast_obj_read_with_callbacks(request_path, &callbacks, NULL);
SDL_LockMutex(model_load_mutex);
model_load_queue[queue_index].mesh = mesh;
model_load_queue[queue_index].stage = MODEL_LOAD_REQUEST_LOADED;
SDL_UnlockMutex(model_load_mutex);
return true;
}
void draw_model(const char *model,
Vec3 position,
Vec3 rotation,
Vec3 scale)
{
/* TODO: make this all work. */
SDL_assert_always(false);
/* if model is missing, queue it up for loading */
struct ModelCacheItem const *item;
/* reuse the key from model_cache */
char *modelcopy;
if (!(item = shgetp_null(model_cache, model))) {
modelcopy = SDL_strdup(model);
shput(model_cache, modelcopy, (struct ModelCacheItemValue) {0});
SDL_LockMutex(model_load_mutex);
struct ModelLoadRequest const request = {
.stage = MODEL_LOAD_REQUEST_WAITING,
.path = modelcopy,
};
arrpush(model_load_queue, request);
SDL_UnlockMutex(model_load_mutex);
SDL_SemPost(workers_job_semaphore);
} else
modelcopy = item->key;
struct ModelDrawCommand const command = {
.model = modelcopy,
.position = position,
.rotation = rotation,
.scale = scale
};
arrpush(model_draw_commands, command);
}
/* prepare vertex buffers before textures are ready */
void models_update_pre_textures(void) {
/* TODO: instead of waiting for all we could start uploading when there's something to upload */
/* it's unlikely to happen, but could improve the worst cases */
while (!models_load_workers_finished())
SDL_Delay(1);
/* TODO: it might be better to parallelize this part by sending buffer mappings to workers */
for (size_t i = 0; i < arrlenu(model_load_queue); ++i) {
fastObjMesh *const mesh = model_load_queue[i].mesh;
SDL_assert(mesh && model_load_queue[i].stage == MODEL_LOAD_REQUEST_LOADED);
struct ModelCacheItem *const item = shgetp(model_cache, model_load_queue[i].path);
/* calculate required vertex and index buffers */
/* texture specific index buffers, to later me merged */
uint32_t **indices = NULL;
arrsetlen(indices, mesh->texture_count);
SDL_memset(indices, 0, mesh->texture_count * sizeof (uint32_t *));
/* vertices are shared for all subcommands */
struct ModelVertex {
Vec3 position;
Vec2 uv;
} *vertices = NULL;
for (unsigned int o = 0; o < mesh->object_count; ++o) {
/* we assume that vertices are only shared within the same object, */
/* which allows us to keep hash table small in most cases */
/* it should work great for quake style brush based models */
struct ModelVertexIndexItem {
struct ModelVertexIndexItemKey {
uint32_t vertex_index;
uint32_t uv_index;
} key;
uint32_t value; /* index to vertex */
} *merge_hash = NULL;
fastObjGroup const *const object = &mesh->objects[o];
size_t idx = 0;
for (unsigned int f = 0; f < object->face_count; ++f) {
unsigned int const fv = mesh->face_vertices[object->face_offset + f];
unsigned int const mi = mesh->face_materials[object->face_offset + f];
/* TODO: handle missing */
fastObjMaterial const *const m = mesh->materials ? &mesh->materials[mi] : NULL;
/* unwrap polygon fans into triangles, first point is reused for all following */
fastObjIndex const i0 = mesh->indices[object->index_offset + idx];
ptrdiff_t i0_hash = hmgeti(merge_hash, ((struct ModelVertexIndexItemKey) { i0.p, i0.t }));
if (i0_hash == -1) {
hmput(merge_hash, ((struct ModelVertexIndexItemKey) { i0.p, i0.t }), arrlenu(vertices));
arrpush(vertices, ((struct ModelVertex) {
(Vec3) { mesh->positions[3 * i0.p + 0] / 64, mesh->positions[3 * i0.p + 1] / 64, mesh->positions[3 * i0.p + 2] / 64 },
(Vec2) { mesh->texcoords[2 * i0.t + 0], mesh->texcoords[2 * i0.t + 1] }
}));
i0_hash = hmlen(merge_hash) - 1;
// i0_hash = arrlenu(vertices) - 1;
}
/* other fan points over shifting by 1 window */
for (unsigned int t = 0; t < fv - 2; ++t) {
fastObjIndex const i1 = mesh->indices[object->index_offset + idx + 1 + t];
ptrdiff_t i1_hash = hmgeti(merge_hash, ((struct ModelVertexIndexItemKey) { i1.p, i1.t }));
if (i1_hash == -1) {
hmput(merge_hash, ((struct ModelVertexIndexItemKey) { i1.p, i1.t }), arrlenu(vertices));
arrpush(vertices, ((struct ModelVertex) {
(Vec3) { mesh->positions[3 * i1.p + 0] / 64, mesh->positions[3 * i1.p + 1] / 64, mesh->positions[3 * i1.p + 2] / 64 },
(Vec2) { mesh->texcoords[2 * i1.t + 0], mesh->texcoords[2 * i1.t + 1] }
}));
i1_hash = hmlen(merge_hash) - 1;
// i1_hash = arrlenu(vertices) - 1;
}
fastObjIndex const i2 = mesh->indices[object->index_offset + idx + 2 + t];
ptrdiff_t i2_hash = hmgeti(merge_hash, ((struct ModelVertexIndexItemKey) { i2.p, i2.t }));
if (i2_hash == -1) {
hmput(merge_hash, ((struct ModelVertexIndexItemKey) { i2.p, i2.t }), arrlenu(vertices));
arrpush(vertices, ((struct ModelVertex) {
(Vec3) { mesh->positions[3 * i2.p + 0] / 64, mesh->positions[3 * i2.p + 1] / 64, mesh->positions[3 * i2.p + 2] / 64 },
(Vec2) { mesh->texcoords[2 * i2.t + 0], mesh->texcoords[2 * i2.t + 1] }
}));
i2_hash = hmlen(merge_hash) - 1;
// i2_hash = arrlenu(vertices) - 1;
}
arrpush(indices[m->map_Kd], (uint32_t)i0_hash);
arrpush(indices[m->map_Kd], (uint32_t)i1_hash);
arrpush(indices[m->map_Kd], (uint32_t)i2_hash);
}
idx += fv;
}
hmfree(merge_hash);
}
if (mesh->color_count != 0)
log_warn("TODO: color in models isn't yet supported");
/* upload vertices */
VertexBuffer vertex_buffer = create_vertex_buffer();
specify_vertex_buffer(vertex_buffer, vertices, arrlenu(vertices) * sizeof (struct ModelVertex));
item->value.vertices = vertex_buffer;
/* collect texture usages into index ranges */
/* TODO: force repeating texture upload before its used in drawing */
for (size_t t = 0; t < arrlenu(indices); ++t) {
VertexBuffer index_buffer = create_vertex_buffer();
specify_vertex_buffer(index_buffer, indices[t], arrlenu(indices[i]) * sizeof (uint32_t));
arrpush(item->value.indices, index_buffer);
/* build command */
DeferredCommandDraw command = {0};
command.vertices = (AttributeArrayPointer) {
.arity = 3,
.type = TWN_FLOAT,
.stride = sizeof (struct ModelVertex),
.offset = offsetof (struct ModelVertex, position),
.buffer = vertex_buffer
};
command.texcoords = (AttributeArrayPointer) {
.arity = 2,
.type = TWN_FLOAT,
.stride = sizeof (struct ModelVertex),
.offset = offsetof (struct ModelVertex, uv),
.buffer = vertex_buffer
};
TextureKey const texture_key = textures_get_key(&ctx.texture_cache, mesh->textures[t].name);
command.textured = true;
command.texture_key = texture_key;
command.texture_repeat = true;
command.element_buffer = index_buffer;
command.element_count = (uint32_t)(arrlenu(indices[t]));
command.range_end = (uint32_t)(arrlenu(indices[t]));
/* TODO: support alpha blended case? */
TextureMode mode = textures_get_mode(&ctx.texture_cache, texture_key);
if (mode == TEXTURE_MODE_GHOSTLY)
mode = TEXTURE_MODE_SEETHROUGH;
command.texture_mode = mode;
command.pipeline = PIPELINE_SPACE;
command.depth_range_high = depth_range_high;
command.depth_range_low = depth_range_low;
DeferredCommand final_command = {
.type = DEFERRED_COMMAND_TYPE_DRAW,
.draw = command
};
arrpush(item->value.commands, final_command);
arrfree(indices[i]);
}
arrfree(vertices);
arrfree(indices);
/* TODO: sort ranges based on length in assumption that bigger mesh parts will occlude more */
}
}
/* adjust uvs into atlases when needed */
void models_update_post_textures(void) {
SDL_assert(!ctx.texture_cache.is_dirty);
arrsetlen(model_load_queue, 0);
}
void finally_draw_models(void) {
for (int i = 0; i < arrlen(model_draw_commands); ++i) {
struct ModelDrawCommand *const command = &model_draw_commands[i];
struct ModelCacheItem *const cache = shgetp(model_cache, command->model);
for (int c = 0; c < arrlen(cache->value.commands); ++c) {
arrpush(deferred_commands, cache->value.commands[c]);
}
}
arrsetlen(model_draw_commands, 0);
}
/* drop model caches */
void free_model_cache(void) {
for (size_t i = 0; i < shlenu(model_cache); ++i) {
// fast_obj_destroy(model_cache[i].value.mesh);
SDL_free(model_cache[i].key);
}
shfree(model_cache);
}
void models_state_init(void) {
if (model_load_initialized)
return;
model_load_mutex = SDL_CreateMutex();
model_load_initialized = true;
}
void models_state_deinit(void) {
if (!model_load_initialized)
return;
free_model_cache();
arrfree(model_load_queue);
SDL_DestroyMutex(model_load_mutex);
model_load_initialized = false;
}

View File

@ -22,4 +22,4 @@
#include "rendering/twn_quads.c"
#include "rendering/twn_triangles.c"
#include "rendering/twn_billboards.c"
#include "rendering/twn_model.c"
#include "rendering/twn_models.c"

View File

@ -685,6 +685,7 @@ static bool initialize(void) {
profile_start("texture and text cache initialization");
textures_cache_init(&ctx.texture_cache, ctx.window);
text_cache_init(&ctx.text_cache);
models_state_init();
profile_end("texture and text cache initialization");
return true;
@ -706,7 +707,7 @@ static void clean_up(void) {
toml_free(ctx.config_table);
PHYSFS_deinit();
workers_deinit();
model_state_deinit();
models_state_deinit();
SDL_free(ctx.base_dir);
SDL_free(ctx.title);
SDL_GL_DeleteContext(ctx.gl_context);

View File

@ -1,5 +1,7 @@
/* single compilation unit for every stb implementation */
#include <stdint.h>
#define STB_DS_IMPLEMENTATION
#define STBDS_ASSERT SDL_assert
#define STBDS_REALLOC(context,ptr,size) ((void)(context), SDL_realloc(ptr, size))

View File

@ -191,6 +191,7 @@ static SDL_Surface *create_surface(int width, int height) {
/* adds a new, blank atlas surface to the cache */
static void add_new_atlas(TextureCache *cache) {
/* TODO: create a PBO surface if possible, reducing duplication */
SDL_Surface *new_atlas = create_surface((int)ctx.texture_atlas_size, (int)ctx.texture_atlas_size);
arrput(cache->atlas_surfaces, new_atlas);
arrput(cache->atlas_textures, create_gpu_texture(TEXTURE_FILTER_NEAREAST, true, 4, (int)ctx.texture_atlas_size, (int)ctx.texture_atlas_size));
@ -630,6 +631,7 @@ void textures_bind(const TextureCache *cache, TextureKey key) {
/* TODO: alternative schemes, such as: array texture, fragment shader and geometry division */
/* TODO: a way to trigger upload before it's used */
void textures_bind_repeating(const TextureCache *cache, TextureKey key) {
if (m_texture_key_is_valid(key)) {
if (cache->hash[key.id].value.loner_texture == 0) {
@ -650,6 +652,7 @@ void textures_bind_repeating(const TextureCache *cache, TextureKey key) {
SDL_LockSurface(texture.data);
/* TODO: optional glCopyImageSubData support, abstracted as copy_gpu_texture() */
upload_gpu_texture(repeating_texture,
texture.data->pixels,
texture.data->format->BytesPerPixel,

View File

@ -28,7 +28,7 @@ static int worker_thread(void *udata) {
continue;
/* process models, which will trigger texture loads */
if (model_load_workers_thread())
if (models_load_workers_thread())
continue;
if (textures_load_workers_thread())