typedef & PascalCase for ALL structs and enums
This commit is contained in:
@ -113,14 +113,14 @@ static SDL_Surface *create_surface(int width, int height) {
|
||||
|
||||
|
||||
/* adds a new, blank atlas surface to the cache */
|
||||
static void add_new_atlas(struct texture_cache *cache) {
|
||||
static void add_new_atlas(TextureCache *cache) {
|
||||
SDL_Surface *new_atlas = create_surface(TEXTURE_ATLAS_SIZE, TEXTURE_ATLAS_SIZE);
|
||||
arrput(cache->atlas_surfaces, new_atlas);
|
||||
arrput(cache->atlas_textures, create_gpu_texture(TEXTURE_FILTER_NEAREAST, true));
|
||||
}
|
||||
|
||||
|
||||
static void upload_texture_from_surface(gpu_texture texture, SDL_Surface *surface) {
|
||||
static void upload_texture_from_surface(GPUTexture texture, SDL_Surface *surface) {
|
||||
SDL_LockSurface(surface);
|
||||
|
||||
upload_gpu_texture(texture, surface->pixels, surface->format->BytesPerPixel, surface->w, surface->h);
|
||||
@ -129,7 +129,7 @@ static void upload_texture_from_surface(gpu_texture texture, SDL_Surface *surfac
|
||||
}
|
||||
|
||||
|
||||
static void recreate_current_atlas_texture(struct texture_cache *cache) {
|
||||
static void recreate_current_atlas_texture(TextureCache *cache) {
|
||||
/* TODO: should surfaces be freed after they cannot be referenced in atlas builing? */
|
||||
/* for example, if full page of 64x64 tiles was already filled, there's no real reason to process them further */
|
||||
SDL_Surface *atlas_surface = cache->atlas_surfaces[cache->atlas_index];
|
||||
@ -164,7 +164,7 @@ static void recreate_current_atlas_texture(struct texture_cache *cache) {
|
||||
|
||||
|
||||
/* uses the textures currently in the cache to create an array of stbrp_rects */
|
||||
static stbrp_rect *create_rects_from_cache(struct texture_cache *cache) {
|
||||
static stbrp_rect *create_rects_from_cache(TextureCache *cache) {
|
||||
stbrp_rect *rects = NULL;
|
||||
for (size_t i = 0; i < shlenu(cache->hash); ++i) {
|
||||
if (cache->hash[i].value.loner_texture != 0)
|
||||
@ -203,7 +203,7 @@ static stbrp_rect *filter_unpacked_rects(stbrp_rect *rects) {
|
||||
|
||||
/* updates the original rects array with the data from packed_rects */
|
||||
/* returns true if all rects were packed successfully */
|
||||
static bool update_rects(struct texture_cache *cache, stbrp_rect *rects, stbrp_rect *packed_rects) {
|
||||
static bool update_rects(TextureCache *cache, stbrp_rect *rects, stbrp_rect *packed_rects) {
|
||||
/* !!! do not grow either of the arrays !!! */
|
||||
/* the reallocation will try to reassign the array pointer, to no effect. */
|
||||
/* see stb_ds.h */
|
||||
@ -229,13 +229,13 @@ static bool update_rects(struct texture_cache *cache, stbrp_rect *rects, stbrp_r
|
||||
|
||||
|
||||
/* updates the atlas location of every rect in the cache */
|
||||
static void update_texture_rects_in_atlas(struct texture_cache *cache, stbrp_rect *rects) {
|
||||
static void update_texture_rects_in_atlas(TextureCache *cache, stbrp_rect *rects) {
|
||||
int r = 0;
|
||||
for (size_t i = 0; i < shlenu(cache->hash); ++i) {
|
||||
if (cache->hash[i].value.loner_texture != 0)
|
||||
continue;
|
||||
|
||||
cache->hash[i].value.srcrect = (t_frect) {
|
||||
cache->hash[i].value.srcrect = (Rect) {
|
||||
.x = (float)rects[r].x,
|
||||
.y = (float)rects[r].y,
|
||||
.w = (float)rects[r].w,
|
||||
@ -247,7 +247,7 @@ static void update_texture_rects_in_atlas(struct texture_cache *cache, stbrp_rec
|
||||
}
|
||||
|
||||
|
||||
void textures_cache_init(struct texture_cache *cache, SDL_Window *window) {
|
||||
void textures_cache_init(TextureCache *cache, SDL_Window *window) {
|
||||
cache->window = window;
|
||||
sh_new_arena(cache->hash);
|
||||
|
||||
@ -258,7 +258,7 @@ void textures_cache_init(struct texture_cache *cache, SDL_Window *window) {
|
||||
}
|
||||
|
||||
|
||||
void textures_cache_deinit(struct texture_cache *cache) {
|
||||
void textures_cache_deinit(TextureCache *cache) {
|
||||
/* free atlas textures */
|
||||
for (size_t i = 0; i < arrlenu(cache->atlas_textures); ++i) {
|
||||
delete_gpu_texture(cache->atlas_textures[i]);
|
||||
@ -282,7 +282,7 @@ void textures_cache_deinit(struct texture_cache *cache) {
|
||||
}
|
||||
|
||||
|
||||
void textures_dump_atlases(struct texture_cache *cache) {
|
||||
void textures_dump_atlases(TextureCache *cache) {
|
||||
PHYSFS_mkdir("/dump");
|
||||
|
||||
const char string_template[] = "/dump/atlas%zd.png";
|
||||
@ -307,18 +307,18 @@ void textures_dump_atlases(struct texture_cache *cache) {
|
||||
}
|
||||
|
||||
|
||||
static enum texture_mode infer_texture_mode(SDL_Surface *surface) {
|
||||
static enum TextureMode infer_texture_mode(SDL_Surface *surface) {
|
||||
const uint32_t amask = surface->format->Amask;
|
||||
if (amask == 0)
|
||||
return TEXTURE_MODE_OPAQUE;
|
||||
|
||||
enum texture_mode result = TEXTURE_MODE_OPAQUE;
|
||||
enum TextureMode result = TEXTURE_MODE_OPAQUE;
|
||||
|
||||
SDL_LockSurface(surface);
|
||||
|
||||
for (int i = 0; i < surface->w * surface->h; ++i) {
|
||||
/* TODO: don't assume 32 bit depth ? */
|
||||
t_color color;
|
||||
Color color;
|
||||
SDL_GetRGBA(((uint32_t *)surface->pixels)[i], surface->format, &color.r, &color.g, &color.b, &color.a);
|
||||
|
||||
if (color.a == 0)
|
||||
@ -335,14 +335,14 @@ static enum texture_mode infer_texture_mode(SDL_Surface *surface) {
|
||||
}
|
||||
|
||||
|
||||
static t_texture_key textures_load(struct texture_cache *cache, const char *path) {
|
||||
static TextureKey textures_load(TextureCache *cache, const char *path) {
|
||||
/* no need to do anything if it was loaded already */
|
||||
const ptrdiff_t i = shgeti(cache->hash, path);
|
||||
if (i >= 0)
|
||||
return (t_texture_key){ (uint16_t)i };
|
||||
return (TextureKey){ (uint16_t)i };
|
||||
|
||||
SDL_Surface *surface = image_to_surface(path);
|
||||
struct texture new_texture = {
|
||||
Texture new_texture = {
|
||||
.data = surface,
|
||||
.mode = infer_texture_mode(surface),
|
||||
};
|
||||
@ -357,18 +357,18 @@ static t_texture_key textures_load(struct texture_cache *cache, const char *path
|
||||
}
|
||||
new_texture.loner_texture = create_gpu_texture(TEXTURE_FILTER_NEAREAST, true);
|
||||
upload_texture_from_surface(new_texture.loner_texture, surface);
|
||||
new_texture.srcrect = (t_frect) { .w = (float)surface->w, .h = (float)surface->h };
|
||||
new_texture.srcrect = (Rect) { .w = (float)surface->w, .h = (float)surface->h };
|
||||
} else {
|
||||
/* will be fully populated as the atlas updates */
|
||||
new_texture.atlas_index = cache->atlas_index;
|
||||
cache->is_dirty = true;
|
||||
}
|
||||
shput(cache->hash, path, new_texture);
|
||||
return (t_texture_key){ (uint16_t)shgeti(cache->hash, path) };
|
||||
return (TextureKey){ (uint16_t)shgeti(cache->hash, path) };
|
||||
}
|
||||
|
||||
|
||||
void textures_update_atlas(struct texture_cache *cache) {
|
||||
void textures_update_atlas(TextureCache *cache) {
|
||||
if (!cache->is_dirty)
|
||||
return;
|
||||
|
||||
@ -421,14 +421,14 @@ static const char *rodata_start;
|
||||
static const char *rodata_stop;
|
||||
|
||||
static const char *last_path = NULL;
|
||||
static t_texture_key last_texture;
|
||||
static struct ptr_to_texture {
|
||||
static TextureKey last_texture;
|
||||
static struct PtrToTexture {
|
||||
const void *key;
|
||||
t_texture_key value;
|
||||
TextureKey value;
|
||||
} *ptr_to_texture;
|
||||
|
||||
/* TODO: separate and reuse */
|
||||
t_texture_key textures_get_key(struct texture_cache *cache, const char *path) {
|
||||
TextureKey textures_get_key(TextureCache *cache, const char *path) {
|
||||
if (rodata_stop == NULL)
|
||||
if (!infer_elf_section_bounds(".rodata", &rodata_start, &rodata_stop))
|
||||
CRY("Section inference", ".rodata section lookup failed");
|
||||
@ -458,7 +458,7 @@ t_texture_key textures_get_key(struct texture_cache *cache, const char *path) {
|
||||
}
|
||||
|
||||
#else
|
||||
t_texture_key textures_get_key(struct texture_cache *cache, const char *path) {
|
||||
TextureKey textures_get_key(TextureCache *cache, const char *path) {
|
||||
/* hash tables are assumed to be stable, so we just return indices */
|
||||
const ptrdiff_t texture = shgeti(cache->hash, path);
|
||||
|
||||
@ -466,12 +466,12 @@ t_texture_key textures_get_key(struct texture_cache *cache, const char *path) {
|
||||
if (texture == -1) {
|
||||
return textures_load(cache, path);
|
||||
} else
|
||||
return (t_texture_key){ (uint16_t)texture };
|
||||
return (TextureKey){ (uint16_t)texture };
|
||||
}
|
||||
|
||||
#endif /* generic implementation of textures_get_key() */
|
||||
|
||||
int32_t textures_get_atlas_id(const struct texture_cache *cache, t_texture_key key) {
|
||||
int32_t textures_get_atlas_id(const TextureCache *cache, TextureKey key) {
|
||||
if (m_texture_key_is_valid(key)) {
|
||||
if (cache->hash[key.id].value.loner_texture != 0)
|
||||
return -cache->hash[key.id].value.loner_texture;
|
||||
@ -484,32 +484,32 @@ int32_t textures_get_atlas_id(const struct texture_cache *cache, t_texture_key k
|
||||
}
|
||||
}
|
||||
|
||||
t_frect textures_get_srcrect(const struct texture_cache *cache, t_texture_key key) {
|
||||
Rect textures_get_srcrect(const TextureCache *cache, TextureKey key) {
|
||||
if (m_texture_key_is_valid(key)) {
|
||||
return cache->hash[key.id].value.srcrect;
|
||||
} else {
|
||||
CRY("Texture lookup failed.",
|
||||
"Tried to get texture that isn't loaded.");
|
||||
return (t_frect){ 0, 0, 0, 0 };
|
||||
return (Rect){ 0, 0, 0, 0 };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
t_frect textures_get_dims(const struct texture_cache *cache, t_texture_key key) {
|
||||
Rect textures_get_dims(const TextureCache *cache, TextureKey key) {
|
||||
if (m_texture_key_is_valid(key)) {
|
||||
if (cache->hash[key.id].value.loner_texture != 0)
|
||||
return cache->hash[key.id].value.srcrect;
|
||||
else
|
||||
return (t_frect){ .w = TEXTURE_ATLAS_SIZE, .h = TEXTURE_ATLAS_SIZE };
|
||||
return (Rect){ .w = TEXTURE_ATLAS_SIZE, .h = TEXTURE_ATLAS_SIZE };
|
||||
} else {
|
||||
CRY("Texture lookup failed.",
|
||||
"Tried to get texture that isn't loaded.");
|
||||
return (t_frect){ 0, 0, 0, 0 };
|
||||
return (Rect){ 0, 0, 0, 0 };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void textures_bind(const struct texture_cache *cache, t_texture_key key) {
|
||||
void textures_bind(const TextureCache *cache, TextureKey key) {
|
||||
if (m_texture_key_is_valid(key)) {
|
||||
if (cache->hash[key.id].value.loner_texture == 0)
|
||||
bind_gpu_texture(cache->atlas_textures[cache->hash[key.id].value.atlas_index]);
|
||||
@ -523,7 +523,7 @@ void textures_bind(const struct texture_cache *cache, t_texture_key key) {
|
||||
|
||||
|
||||
/* TODO: alternative schemes, such as: array texture, fragment shader and geometry division */
|
||||
void textures_bind_repeating(const struct texture_cache *cache, t_texture_key key) {
|
||||
void textures_bind_repeating(const TextureCache *cache, TextureKey key) {
|
||||
if (m_texture_key_is_valid(key)) {
|
||||
if (cache->hash[key.id].value.loner_texture == 0) {
|
||||
|
||||
@ -533,9 +533,9 @@ void textures_bind_repeating(const struct texture_cache *cache, t_texture_key ke
|
||||
return;
|
||||
}
|
||||
|
||||
const struct texture texture = cache->hash[key.id].value;
|
||||
const Texture texture = cache->hash[key.id].value;
|
||||
|
||||
const gpu_texture repeating_texture = create_gpu_texture(TEXTURE_FILTER_NEAREAST, false);
|
||||
const GPUTexture repeating_texture = create_gpu_texture(TEXTURE_FILTER_NEAREAST, false);
|
||||
|
||||
SDL_LockSurface(texture.data);
|
||||
|
||||
@ -561,7 +561,7 @@ void textures_bind_repeating(const struct texture_cache *cache, t_texture_key ke
|
||||
}
|
||||
|
||||
|
||||
enum texture_mode textures_get_mode(const struct texture_cache *cache, t_texture_key key) {
|
||||
TextureMode textures_get_mode(const TextureCache *cache, TextureKey key) {
|
||||
if (m_texture_key_is_valid(key)) {
|
||||
return cache->hash[key.id].value.mode;
|
||||
} else {
|
||||
@ -572,14 +572,14 @@ enum texture_mode textures_get_mode(const struct texture_cache *cache, t_texture
|
||||
}
|
||||
|
||||
|
||||
size_t textures_get_num_atlases(const struct texture_cache *cache) {
|
||||
size_t textures_get_num_atlases(const TextureCache *cache) {
|
||||
return cache->atlas_index + 1;
|
||||
}
|
||||
|
||||
void textures_reset_state(void) {
|
||||
#if defined(__linux__) && !defined(HOT_RELOAD_SUPPORT)
|
||||
last_path = NULL;
|
||||
last_texture = (t_texture_key){0};
|
||||
last_texture = (TextureKey){0};
|
||||
shfree(ptr_to_texture);
|
||||
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user