#include "lowmem.h"
#include <cache_struct.h>
#include <i386.h>
#include <misc.h>
#include <assert.h>
#include <util.h>
struct slab_header_struct {
void *next;
unsigned short count;
void *first_free;
};
#define get_slab_size(k) (exp2(k) * PAGE_SIZE)
#define is_slab_empty(slab) (!((struct slab_header_struct *)(slab))->count)
#define next_slab(slab) (((struct slab_header_struct *)(slab))->next)
#define next_buf(buf) (*(void **)(buf))
static inline unsigned int get_slab_order(unsigned long obj_size)
{
return max(ulog2(ulog2(ulog2(obj_size))), ulog2((obj_size + sizeof (struct slab_header_struct)) / PAGE_SIZE));
}
static void push_free_buf(struct slab_header_struct *slab_header, void *buf)
{
next_buf(buf) = slab_header->first_free;
slab_header->first_free = buf;
}
static void *pop_free_buf(struct slab_header_struct *slab_header)
{
void *buf;
if (!(buf = slab_header->first_free))
return NULL;
slab_header->first_free = next_buf(slab_header->first_free);
return buf;
}
static void init_slab(void *slab, unsigned long size, unsigned long obj_size)
{
struct slab_header_struct *header;
void *buf;
unsigned long n;
header = slab;
header->next = NULL;
header->count = 0;
n = (size - sizeof (struct slab_header_struct)) / obj_size;
assert(n >= 1);
header->first_free = slab + size - n * obj_size;
buf = header->first_free;
n = 1;
while (buf <= slab + size - 2 * obj_size) {
next_buf(buf) = buf + obj_size;
buf += obj_size;
n++;
}
next_buf(buf) = NULL;
assert(n == (size - sizeof (struct slab_header_struct)) / obj_size);
}
static void *slab_alloc(void *slab)
{
struct slab_header_struct *header;
void *obj;
header = slab;
if (!(obj = pop_free_buf(header)))
return NULL;
header->count++;
return obj;
}
static void slab_free(void *slab, void *obj)
{
struct slab_header_struct *header;
header = slab;
assert(header->count);
push_free_buf(header, obj);
header->count--;
}
static void *alloc_slab(unsigned long obj_size)
{
unsigned int k;
void *slab;
k = get_slab_order(obj_size);
if (!(slab = alloc_page_frames(k)))
return NULL;
init_slab(slab, get_slab_size(k), obj_size);
return slab;
}
static void free_slab(void *slab, unsigned long obj_size)
{
unsigned int k;
k = get_slab_order(obj_size);
free_page_frames(slab, k);
}
void kmem_cache_init(struct cache_struct *cache, unsigned long size)
{
cache->size = size;
cache->first_slab = NULL;
}
void kmem_cache_check_empty(const struct cache_struct *cache)
{
assert(!cache->first_slab);
}
void *kmem_cache_alloc(struct cache_struct *cache)
{
void **slab;
void *obj;
slab = &cache->first_slab;
while (1) {
if (!*slab)
if (!(*slab = alloc_slab(cache->size)))
return NULL;
if ((obj = slab_alloc(*slab)))
return obj;
slab = &next_slab(*slab);
}
}
void kmem_cache_free(struct cache_struct *cache, void *obj)
{
unsigned long slab_size;
void **slab, *next;
slab_size = get_slab_size(get_slab_order(cache->size));
slab = &cache->first_slab;
while (1) {
assert(*slab);
if (obj >= *slab && obj < *slab + slab_size) {
slab_free(*slab, obj);
if (is_slab_empty(*slab)) {
next = next_slab(*slab);
free_slab(*slab, cache->size);
*slab = next;
}
return;
}
slab = &next_slab(*slab);
}
}