View of xos/mm/slab.c


XOS | Parent Directory | View | Download

/* Slab allocator */
/* Copyright (C) 2008  Emmanuel Varoquaux
 
   This file is part of XOS.
 
   This program is free software: you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation, either version 3 of the License, or
   (at your option) any later version.
 
   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
 
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>. */
 
#include "lowmem.h"
 
#include <cache_struct.h>
#include <i386.h>
#include <misc.h>
#include <assert.h>
#include <util.h>
 
struct slab_header_struct {
  void *next;           /* slab suivant */
  unsigned short count; /* nombre d'objets alloues */
  void *first_free;     /* premier buffer libre */
};
 
#define get_slab_size(k)    (exp2(k) * PAGE_SIZE)
#define is_slab_empty(slab) (!((struct slab_header_struct *)(slab))->count)
#define next_slab(slab)     (((struct slab_header_struct *)(slab))->next)
#define next_buf(buf)       (*(void **)(buf))
 
static inline unsigned int get_slab_order(unsigned long obj_size)
{
  return max(ulog2(ulog2(ulog2(obj_size))), ulog2((obj_size + sizeof (struct slab_header_struct)) / PAGE_SIZE));
}
 
static void push_free_buf(struct slab_header_struct *slab_header, void *buf)
{
  next_buf(buf) = slab_header->first_free;
  slab_header->first_free = buf;
}
 
static void *pop_free_buf(struct slab_header_struct *slab_header)
{
  void *buf;
 
  if (!(buf = slab_header->first_free))
    return NULL;
  slab_header->first_free = next_buf(slab_header->first_free);
  return buf;
}
 
/* size doit etre superieur ou egal a obj_size + sizeof (struct slab_header_struct). */
static void init_slab(void *slab, unsigned long size, unsigned long obj_size)
{
  struct slab_header_struct *header;
  void *buf;
  unsigned long n;
 
  /* initialisation de l'en-tete */
  header = slab;
  header->next = NULL;
  header->count = 0;
  n = (size - sizeof (struct slab_header_struct)) / obj_size; /* nombre d'objets dans le slab - jamais nul */
  assert(n >= 1);
  header->first_free = slab + size - n * obj_size;
 
  /* initialisation de la liste des buffer libres */
  buf = header->first_free;
  n = 1;
  while (buf <= slab + size - 2 * obj_size) {
    next_buf(buf) = buf + obj_size;
    buf += obj_size;
    n++;
  }
  next_buf(buf) = NULL;
  assert(n == (size - sizeof (struct slab_header_struct)) / obj_size);
}
 
static void *slab_alloc(void *slab)
{
  struct slab_header_struct *header;
  void *obj;
 
  header = slab;
  if (!(obj = pop_free_buf(header)))
    return NULL;
  header->count++;
  return obj;
}
 
static void slab_free(void *slab, void *obj)
{
  struct slab_header_struct *header;
 
  header = slab;
  assert(header->count);
  push_free_buf(header, obj);
  header->count--;
}
 
/* La taille du slab allouee est fonction de obj_size. Ainsi:
 *   obj_size=8 => slab_size=2*PAGE_SIZE
 *   obj_size=32 => slab_size=4*PAGE_SIZE
 *   etc. */
static void *alloc_slab(unsigned long obj_size)
{
  unsigned int k;
  void *slab;
 
  k = get_slab_order(obj_size);
  if (!(slab = alloc_page_frames(k)))
    return NULL;
  init_slab(slab, get_slab_size(k), obj_size);
  return slab;
}
 
static void free_slab(void *slab, unsigned long obj_size)
{
  unsigned int k;
 
  k = get_slab_order(obj_size);
  free_page_frames(slab, k);
}
 
/* size doit etre superieur ou egal a sizeof (void *). */
void kmem_cache_init(struct cache_struct *cache, unsigned long size)
{
  cache->size = size;
  cache->first_slab = NULL;
}
 
void kmem_cache_check_empty(const struct cache_struct *cache)
{
  assert(!cache->first_slab);
}
 
void *kmem_cache_alloc(struct cache_struct *cache)
{
  void **slab;
  void *obj;
 
  slab = &cache->first_slab;
  while (1) {
    if (!*slab)
      if (!(*slab = alloc_slab(cache->size)))
        return NULL;
    if ((obj = slab_alloc(*slab)))
      return obj;
    slab = &next_slab(*slab);
  }
}
 
void kmem_cache_free(struct cache_struct *cache, void *obj)
{
  unsigned long slab_size;
  void **slab, *next;
 
  slab_size = get_slab_size(get_slab_order(cache->size));
  slab = &cache->first_slab;
  while (1) {
    assert(*slab);
    if (obj >= *slab && obj < *slab + slab_size) {
      slab_free(*slab, obj);
      if (is_slab_empty(*slab)) {
        next = next_slab(*slab);
        free_slab(*slab, cache->size);
        *slab = next;
      }
      return;
    }
    slab = &next_slab(*slab);
  }
}