townengine/src/twn_textures.c

587 lines
19 KiB
C
Raw Normal View History

#include "twn_textures_c.h"
#include "twn_config.h"
#include "twn_util.h"
#include "twn_engine_context_c.h"
2024-07-08 00:44:20 +00:00
#include <SDL2/SDL.h>
#include <physfs.h>
#include <physfsrwops.h>
#include <stb_ds.h>
#include <stb_rect_pack.h>
2024-08-27 11:45:26 +00:00
#define STB_IMAGE_IMPLEMENTATION
#include <stb_image.h>
2024-07-08 00:44:20 +00:00
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
static SDL_Surface *image_to_surface(const char *path) {
2024-07-08 00:44:20 +00:00
SDL_RWops *handle = PHYSFSRWOPS_openRead(path);
if (handle == NULL)
2024-08-27 11:45:26 +00:00
goto ERR_CANNOT_OPEN_FILE;
2024-07-08 00:44:20 +00:00
2024-08-27 11:45:26 +00:00
/* TODO: try using callbacks so that less memory is used */
Sint64 file_size = handle->size(handle);
SDL_assert_always(file_size != -1);
void *file_mem = malloc(file_size);
size_t read = handle->read(handle, file_mem, 1, file_size);
SDL_assert_always(read == (size_t)file_size);
SDL_FreeRW(handle);
2024-07-08 00:44:20 +00:00
2024-08-27 11:45:26 +00:00
if (!file_mem)
goto ERR_CANNOT_ALLOCATE_MEM;
int width, height, channels;
void *image_mem = stbi_load_from_memory(file_mem, (int)file_size, &width, &height, &channels, 4);
if (!image_mem)
goto ERR_CANNOT_READ_IMAGE;
free(file_mem);
Uint32 rmask, gmask, bmask, amask;
#if SDL_BYTEORDER == SDL_BIG_ENDIAN
rmask = 0xff000000;
gmask = 0x00ff0000;
bmask = 0x0000ff00;
amask = 0x000000ff;
#else
rmask = 0x000000ff;
gmask = 0x0000ff00;
bmask = 0x00ff0000;
amask = 0xff000000;
#endif
SDL_Surface* surface = SDL_CreateRGBSurfaceFrom(image_mem, width, height,
channels * 8,
width * channels,
rmask, gmask, bmask, amask);
if (surface == NULL)
goto ERR_CANNOT_CREATE_SURFACE;
SDL_SetSurfaceBlendMode(surface, SDL_BLENDMODE_NONE);
SDL_SetSurfaceRLE(surface, true);
return surface;
ERR_CANNOT_CREATE_SURFACE:
stbi_image_free(image_mem);
ERR_CANNOT_READ_IMAGE:
free(file_mem);
ERR_CANNOT_ALLOCATE_MEM:
SDL_FreeRW(handle);
2024-07-08 00:44:20 +00:00
2024-08-27 11:45:26 +00:00
ERR_CANNOT_OPEN_FILE:
2024-07-08 00:44:20 +00:00
CRY(path, "Failed to load image. Aborting...");
die_abruptly();
}
2024-07-31 21:23:32 +00:00
static SDL_Surface *create_surface(int width, int height) {
Uint32 rmask, gmask, bmask, amask;
#if SDL_BYTEORDER == SDL_BIG_ENDIAN
rmask = 0xff000000;
gmask = 0x00ff0000;
bmask = 0x0000ff00;
amask = 0x000000ff;
#else
rmask = 0x000000ff;
gmask = 0x0000ff00;
bmask = 0x00ff0000;
amask = 0xff000000;
#endif
2024-07-31 21:23:32 +00:00
SDL_Surface *surface = SDL_CreateRGBSurface(0,
width,
height,
TEXTURE_ATLAS_BIT_DEPTH,
rmask,
gmask,
bmask,
amask);
SDL_SetSurfaceBlendMode(surface, SDL_BLENDMODE_NONE);
SDL_SetSurfaceRLE(surface, true);
return surface;
}
/* adds a new, blank atlas surface to the cache */
static void add_new_atlas(struct texture_cache *cache) {
SDL_Surface *new_atlas = create_surface(TEXTURE_ATLAS_SIZE, TEXTURE_ATLAS_SIZE);
arrput(cache->atlas_surfaces, new_atlas);
arrput(cache->atlas_textures, create_gpu_texture(TEXTURE_FILTER_NEAREAST, true));
2024-07-31 21:23:32 +00:00
}
static void upload_texture_from_surface(gpu_texture texture, SDL_Surface *surface) {
2024-07-31 21:23:32 +00:00
SDL_LockSurface(surface);
upload_gpu_texture(texture, surface->pixels, 4, surface->w, surface->h);
2024-07-31 21:23:32 +00:00
SDL_UnlockSurface(surface);
2024-07-08 00:44:20 +00:00
}
static void recreate_current_atlas_texture(struct texture_cache *cache) {
/* TODO: should surfaces be freed after they cannot be referenced in atlas builing? */
/* for example, if full page of 64x64 tiles was already filled, there's no real reason to process them further */
2024-07-08 00:44:20 +00:00
SDL_Surface *atlas_surface = cache->atlas_surfaces[cache->atlas_index];
/* clear */
SDL_FillRect(atlas_surface, NULL, 0);
/* blit the texture surfaces onto the atlas */
for (size_t i = 0; i < shlenu(cache->hash); ++i) {
/* skip all that aren't part of currently built one */
2024-07-08 00:44:20 +00:00
if (cache->hash[i].value.atlas_index != cache->atlas_index)
continue;
/* skip loners */
if (cache->hash[i].value.loner_texture != 0)
continue;
2024-07-08 00:44:20 +00:00
SDL_BlitSurface(cache->hash[i].value.data,
NULL,
atlas_surface,
&(SDL_Rect){
2024-07-30 15:09:21 +00:00
.x = (int)cache->hash[i].value.srcrect.x,
.y = (int)cache->hash[i].value.srcrect.y,
.w = (int)cache->hash[i].value.srcrect.w,
.h = (int)cache->hash[i].value.srcrect.h,
});
2024-07-08 00:44:20 +00:00
}
/* texturize it! */
upload_texture_from_surface(cache->atlas_textures[cache->atlas_index], atlas_surface);
2024-07-08 00:44:20 +00:00
}
/* uses the textures currently in the cache to create an array of stbrp_rects */
static stbrp_rect *create_rects_from_cache(struct texture_cache *cache) {
stbrp_rect *rects = NULL;
for (size_t i = 0; i < shlenu(cache->hash); ++i) {
if (cache->hash[i].value.loner_texture != 0)
continue;
const SDL_Surface *surface_data = cache->hash[i].value.data;
2024-07-08 00:44:20 +00:00
stbrp_rect new_rect = {
.w = surface_data->w,
.h = surface_data->h,
};
arrput(rects, new_rect);
}
return rects;
}
/* returns an array which contains a _copy_ of every unpacked rect in rects. */
/* each of these copies will have their original index in rects saved in */
/* their `id` field, which is an int. */
static stbrp_rect *filter_unpacked_rects(stbrp_rect *rects) {
stbrp_rect *unpacked_rects = NULL;
for (size_t i = 0; i < arrlenu(rects); ++i) {
/* already packed */
if (rects[i].was_packed)
continue;
arrput(unpacked_rects, rects[i]);
/* stb_rect_pack mercifully gives you a free userdata int */
/* the index is saved there so the original array can be updated later */
unpacked_rects[arrlenu(unpacked_rects)-1].id = (int)i;
}
return unpacked_rects;
}
/* updates the original rects array with the data from packed_rects */
/* returns true if all rects were packed successfully */
static bool update_rects(struct texture_cache *cache, stbrp_rect *rects, stbrp_rect *packed_rects) {
/* !!! do not grow either of the arrays !!! */
/* the reallocation will try to reassign the array pointer, to no effect. */
/* see stb_ds.h */
bool packed_all = true;
for (size_t i = 0; i < arrlenu(packed_rects); ++i) {
/* we can check if any rects failed to be packed right here */
/* it's not ideal, but it avoids another iteration */
if (!packed_rects[i].was_packed) {
packed_all = false;
continue;
}
rects[packed_rects[i].id] = packed_rects[i];
/* while the order of the elements in the hash map is unknown to us, */
/* their equivalents in `rects` are in that same (unknown) order, which means */
/* we can use the index we had saved to find the original texture struct */
cache->hash[packed_rects[i].id].value.atlas_index = cache->atlas_index;
}
return packed_all;
}
/* updates the atlas location of every rect in the cache */
static void update_texture_rects_in_atlas(struct texture_cache *cache, stbrp_rect *rects) {
int r = 0;
2024-07-08 00:44:20 +00:00
for (size_t i = 0; i < arrlenu(rects); ++i) {
if (cache->hash[i].value.loner_texture != 0)
continue;
2024-07-30 15:09:21 +00:00
cache->hash[i].value.srcrect = (t_frect) {
.x = (float)rects[r].x,
.y = (float)rects[r].y,
.w = (float)rects[r].w,
.h = (float)rects[r].h,
2024-07-08 00:44:20 +00:00
};
r++;
2024-07-08 00:44:20 +00:00
}
}
void textures_cache_init(struct texture_cache *cache, SDL_Window *window) {
cache->window = window;
sh_new_arena(cache->hash);
cache->node_buffer = cmalloc(sizeof *cache->node_buffer * TEXTURE_ATLAS_SIZE);
add_new_atlas(cache);
recreate_current_atlas_texture(cache);
}
void textures_cache_deinit(struct texture_cache *cache) {
/* free atlas textures */
for (size_t i = 0; i < arrlenu(cache->atlas_textures); ++i) {
delete_gpu_texture(cache->atlas_textures[i]);
2024-07-08 00:44:20 +00:00
}
arrfree(cache->atlas_textures);
/* free atlas surfaces */
for (size_t i = 0; i < arrlenu(cache->atlas_surfaces); ++i) {
SDL_FreeSurface(cache->atlas_surfaces[i]);
}
arrfree(cache->atlas_surfaces);
/* free cache hashes */
for (size_t i = 0; i < shlenu(cache->hash); ++i) {
2024-08-27 11:48:08 +00:00
stbi_image_free(cache->hash[i].value.data->pixels);
2024-07-08 00:44:20 +00:00
SDL_FreeSurface(cache->hash[i].value.data);
}
shfree(cache->hash);
free(cache->node_buffer);
}
void textures_dump_atlases(struct texture_cache *cache) {
PHYSFS_mkdir("/dump");
const char string_template[] = "/dump/atlas%zd.png";
char buf[2048]; /* larger than will ever be necessary */
size_t i = 0;
for (; i < arrlenu(cache->atlas_surfaces); ++i) {
snprintf(buf, sizeof buf, string_template, i);
SDL_RWops *handle = PHYSFSRWOPS_openWrite(buf);
if (handle == NULL) {
CRY("Texture atlas dump failed.", "File could not be opened");
return;
}
2024-08-27 11:45:26 +00:00
/* TODO: */
// IMG_SavePNG_RW(cache->atlas_surfaces[i], handle, true);
CRY("Unimplemented", "textures_dump_atlases dumping is not there, sorry");
2024-07-08 00:44:20 +00:00
log_info("Dumped atlas %s", buf);
}
}
static enum texture_mode infer_texture_mode(SDL_Surface *surface) {
const uint32_t amask = surface->format->Amask;
if (amask == 0)
return TEXTURE_MODE_OPAQUE;
enum texture_mode result = TEXTURE_MODE_OPAQUE;
SDL_LockSurface(surface);
for (int i = 0; i < surface->w * surface->h; ++i) {
/* TODO: don't assume 32 bit depth ? */
t_color color;
SDL_GetRGBA(((uint32_t *)surface->pixels)[i], surface->format, &color.r, &color.g, &color.b, &color.a);
if (color.a == 0)
result = TEXTURE_MODE_SEETHROUGH;
else if (color.a != 255) {
result = TEXTURE_MODE_GHOSTLY;
break;
}
}
SDL_UnlockSurface(surface);
return result;
}
static t_texture_key textures_load(struct texture_cache *cache, const char *path) {
2024-07-08 00:44:20 +00:00
/* no need to do anything if it was loaded already */
const ptrdiff_t i = shgeti(cache->hash, path);
if (i >= 0)
return (t_texture_key){ (uint16_t)i };
2024-07-08 00:44:20 +00:00
SDL_Surface *surface = image_to_surface(path);
struct texture new_texture = {
.data = surface,
.mode = infer_texture_mode(surface),
};
2024-07-08 00:44:20 +00:00
/* it's a "loner texture," it doesn't fit in an atlas so it's not in one */
if (surface->w >= TEXTURE_ATLAS_SIZE || surface->h >= TEXTURE_ATLAS_SIZE) {
if (ctx.game.debug) {
2024-09-23 06:50:01 +00:00
if (surface->w > 2048 || surface->h > 2048)
log_warn("Unportable texture dimensions for %s, use 2048x2048 at max", path);
if (!is_power_of_two(surface->w) || !is_power_of_two(surface->h))
log_warn("Unportable texture dimensions for %s, should be powers of 2", path);
}
new_texture.loner_texture = create_gpu_texture(TEXTURE_FILTER_NEAREAST, true);
upload_texture_from_surface(new_texture.loner_texture, surface);
2024-07-30 15:09:21 +00:00
new_texture.srcrect = (t_frect) { .w = (float)surface->w, .h = (float)surface->h };
2024-07-08 00:44:20 +00:00
} else {
2024-09-23 06:50:01 +00:00
/* will be fully populated as the atlas updates */
2024-07-08 00:44:20 +00:00
new_texture.atlas_index = cache->atlas_index;
cache->is_dirty = true;
}
2024-09-23 06:50:01 +00:00
shput(cache->hash, path, new_texture);
return (t_texture_key){ (uint16_t)shgeti(cache->hash, path) };
2024-07-08 00:44:20 +00:00
}
void textures_update_atlas(struct texture_cache *cache) {
if (!cache->is_dirty)
return;
2024-07-08 00:44:20 +00:00
/* this function makes a lot more sense if you read stb_rect_pack.h */
stbrp_context pack_ctx; /* target info */
stbrp_init_target(&pack_ctx,
TEXTURE_ATLAS_SIZE,
TEXTURE_ATLAS_SIZE,
cache->node_buffer,
TEXTURE_ATLAS_SIZE);
stbrp_rect *rects = create_rects_from_cache(cache);
/* we have to keep packing, and creating atlases if necessary, */
/* until all rects have been packed. */
/* ideally, this will not iterate more than once. */
bool textures_remaining = true;
while (textures_remaining) {
stbrp_rect *rects_to_pack = filter_unpacked_rects(rects);
stbrp_pack_rects(&pack_ctx, rects_to_pack, (int)arrlen(rects_to_pack));
textures_remaining = !update_rects(cache, rects, rects_to_pack);
arrfree(rects_to_pack); /* got what we needed */
/* some textures couldn't be packed */
if (textures_remaining) {
update_texture_rects_in_atlas(cache, rects);
recreate_current_atlas_texture(cache);
/* need a new atlas for next time */
add_new_atlas(cache);
++cache->atlas_index;
}
};
update_texture_rects_in_atlas(cache, rects);
recreate_current_atlas_texture(cache);
cache->is_dirty = false;
2024-07-08 00:44:20 +00:00
arrfree(rects);
}
/* EXPERIMANTAL: LIKELY TO BE REMOVED! */
#if defined(__linux__) && !defined(HOT_RELOAD_SUPPORT) /* use rodata elf section for fast lookups of repeating textures */
2024-07-08 00:44:20 +00:00
#include "system/linux/twn_elf.h"
static const char *rodata_start;
static const char *rodata_stop;
2024-08-21 15:00:27 +00:00
static const char *last_path = NULL;
static t_texture_key last_texture;
static struct ptr_to_texture {
const void *key;
t_texture_key value;
} *ptr_to_texture;
2024-07-31 21:23:32 +00:00
/* TODO: separate and reuse */
t_texture_key textures_get_key(struct texture_cache *cache, const char *path) {
if (rodata_stop == NULL)
if (!infer_elf_section_bounds(".rodata", &rodata_start, &rodata_stop))
CRY("Section inference", ".rodata section lookup failed");
/* the fastest path */
if (path == last_path)
return last_texture;
else {
/* moderately fast path, by pointer hashing */
const ptrdiff_t texture = hmgeti(ptr_to_texture, path);
if (texture != -1) {
if (path >= rodata_start && path < rodata_stop)
last_path = path;
last_texture = ptr_to_texture[texture].value;
return last_texture;
}
}
/* try loading */
last_texture = textures_load(cache, path);
hmput(ptr_to_texture, path, last_texture);
if (path >= rodata_start && path < rodata_stop)
last_path = path;
return last_texture;
}
#else
t_texture_key textures_get_key(struct texture_cache *cache, const char *path) {
/* hash tables are assumed to be stable, so we just return indices */
const ptrdiff_t texture = shgeti(cache->hash, path);
2024-07-08 00:44:20 +00:00
/* load it if it isn't */
if (texture == -1) {
return textures_load(cache, path);
} else
return (t_texture_key){ (uint16_t)texture };
}
2024-07-08 00:44:20 +00:00
#endif /* generic implementation of textures_get_key() */
2024-07-08 00:44:20 +00:00
int32_t textures_get_atlas_id(const struct texture_cache *cache, t_texture_key key) {
if (m_texture_key_is_valid(key)) {
if (cache->hash[key.id].value.loner_texture != 0)
return -cache->hash[key.id].value.loner_texture;
else
return cache->hash[key.id].value.atlas_index;
} else {
CRY("Texture lookup failed.",
"Tried to get atlas id that isn't loaded.");
return 0;
}
}
2024-07-30 15:09:21 +00:00
t_frect textures_get_srcrect(const struct texture_cache *cache, t_texture_key key) {
if (m_texture_key_is_valid(key)) {
return cache->hash[key.id].value.srcrect;
} else {
CRY("Texture lookup failed.",
"Tried to get texture that isn't loaded.");
2024-07-30 15:09:21 +00:00
return (t_frect){ 0, 0, 0, 0 };
2024-07-08 00:44:20 +00:00
}
}
2024-07-30 15:09:21 +00:00
t_frect textures_get_dims(const struct texture_cache *cache, t_texture_key key) {
if (m_texture_key_is_valid(key)) {
if (cache->hash[key.id].value.loner_texture != 0)
return cache->hash[key.id].value.srcrect;
else
2024-07-30 15:09:21 +00:00
return (t_frect){ .w = TEXTURE_ATLAS_SIZE, .h = TEXTURE_ATLAS_SIZE };
} else {
CRY("Texture lookup failed.",
"Tried to get texture that isn't loaded.");
2024-07-30 15:09:21 +00:00
return (t_frect){ 0, 0, 0, 0 };
}
2024-07-08 00:44:20 +00:00
}
void textures_bind(const struct texture_cache *cache, t_texture_key key) {
if (m_texture_key_is_valid(key)) {
if (cache->hash[key.id].value.loner_texture == 0)
bind_gpu_texture(cache->atlas_textures[cache->hash[key.id].value.atlas_index]);
else
bind_gpu_texture(cache->hash[key.id].value.loner_texture);
} else if (key.id == 0) {
CRY("Texture binding failed.",
"Tried to get texture that isn't loaded.");
2024-07-08 00:44:20 +00:00
}
}
2024-07-31 21:23:32 +00:00
/* TODO: alternative schemes, such as: array texture, fragment shader and geometry division */
void textures_bind_repeating(const struct texture_cache *cache, t_texture_key key) {
2024-07-31 21:23:32 +00:00
if (m_texture_key_is_valid(key)) {
if (cache->hash[key.id].value.loner_texture == 0) {
/* already allocated */
if (cache->hash[key.id].value.repeating_texture != 0) {
bind_gpu_texture(cache->hash[key.id].value.repeating_texture);
2024-07-31 21:23:32 +00:00
return;
}
const struct texture texture = cache->hash[key.id].value;
const gpu_texture repeating_texture = create_gpu_texture(TEXTURE_FILTER_NEAREAST, false);
2024-07-31 21:23:32 +00:00
SDL_LockSurface(texture.data);
upload_gpu_texture(repeating_texture,
texture.data->pixels,
4,
texture.data->w,
texture.data->h);
2024-07-31 21:23:32 +00:00
SDL_UnlockSurface(texture.data);
cache->hash[key.id].value.repeating_texture = repeating_texture;
2024-09-23 06:50:01 +00:00
bind_gpu_texture(repeating_texture);
2024-07-31 21:23:32 +00:00
} else
bind_gpu_texture(cache->hash[key.id].value.loner_texture);
2024-07-31 21:23:32 +00:00
} else if (key.id == 0) {
CRY("Texture binding failed.",
"Tried to get texture that isn't loaded.");
}
}
enum texture_mode textures_get_mode(const struct texture_cache *cache, t_texture_key key) {
if (m_texture_key_is_valid(key)) {
return cache->hash[key.id].value.mode;
} else {
CRY("Texture binding failed.",
"Tried to get texture that isn't loaded.");
return TEXTURE_MODE_GHOSTLY;
}
}
size_t textures_get_num_atlases(const struct texture_cache *cache) {
2024-07-08 00:44:20 +00:00
return cache->atlas_index + 1;
}
2024-08-21 15:00:27 +00:00
void textures_reset_state(void) {
#if defined(__linux__) && !defined(HOT_RELOAD_SUPPORT)
2024-08-21 15:00:27 +00:00
last_path = NULL;
last_texture = (t_texture_key){0};
shfree(ptr_to_texture);
#endif
}