Skip to content
Snippets Groups Projects
kalloc.c 2.55 KiB
Newer Older
rtm's avatar
rtm committed
/*
 * physical memory allocator, intended to be used to allocate
 * memory for user processes. allocates in 4096-byte "pages".
 * free list is sorted and combines adjacent pages into
 * long runs, to make it easier to allocate big segments.
 * one reason the page size is 4k is that the x86 segment size
 * granularity is 4k.
 */

#include "param.h"
#include "types.h"
#include "defs.h"
#include "param.h"
#include "mmu.h"
#include "proc.h"
rtm's avatar
rtm committed
#include "spinlock.h"

struct spinlock kalloc_lock;
rtm's avatar
rtm committed

struct run {
  struct run *next;
  int len; // bytes
};
struct run *freelist;

/*
 * initialize free list of physical pages. this code
 * cheats by just considering the one megabyte of pages
 * after _end.
 */
void
rsc's avatar
rsc committed
kinit(void)
rtm's avatar
rtm committed
{
  extern int end;
rtm's avatar
rtm committed
  char *start;
  
  initlock(&kalloc_lock, "kalloc");
rtm's avatar
rtm committed
  start = (char *) &end;
  start = (char *) (((uint)start + PAGE) & ~(PAGE-1));
rtm's avatar
rtm committed
  mem = 256; // assume 256 pages of RAM
rtm's avatar
rtm committed
  cprintf("mem = %d\n", mem * PAGE);
  kfree(start, mem * PAGE);
}

void
kfree(char *cp, int len)
{
  struct run **rr;
  struct run *p = (struct run *) cp;
  struct run *pend = (struct run *) (cp + len);
rtm's avatar
rtm committed
  int i;
rtm's avatar
rtm committed

  if(len % PAGE)
    panic("kfree");

rtm's avatar
rtm committed
  // XXX fill with junk to help debug
  for(i = 0; i < len; i++)
    cp[i] = 1;

rtm's avatar
rtm committed
  acquire(&kalloc_lock);

rtm's avatar
rtm committed
  rr = &freelist;
  while(*rr){
    struct run *rend = (struct run *) ((char *)(*rr) + (*rr)->len);
    if(p >= *rr && p < rend)
      panic("freeing free page");
    if(pend == *rr){
      p->len = len + (*rr)->len;
      p->next = (*rr)->next;
      *rr = p;
rtm's avatar
rtm committed
      goto out;
rtm's avatar
rtm committed
    }
    if(pend < *rr){
      p->len = len;
      p->next = *rr;
      *rr = p;
rtm's avatar
rtm committed
      goto out;
rtm's avatar
rtm committed
    }
    if(p == rend){
      (*rr)->len += len;
      if((*rr)->next && (*rr)->next == pend){
        (*rr)->len += (*rr)->next->len;
        (*rr)->next = (*rr)->next->next;
      }
rtm's avatar
rtm committed
      goto out;
rtm's avatar
rtm committed
    }
    rr = &((*rr)->next);
  }
  p->len = len;
  p->next = 0;
  *rr = p;
rtm's avatar
rtm committed

 out:
  release(&kalloc_lock);
rtm's avatar
rtm committed
}

/*
 * allocate n bytes of physical memory.
 * returns a kernel-segment pointer.
 * returns 0 if there's no run that's big enough.
 */
rsc's avatar
rsc committed
char*
rtm's avatar
rtm committed
kalloc(int n)
{
  struct run **rr;

  if(n % PAGE)
    panic("kalloc");

rtm's avatar
rtm committed
  acquire(&kalloc_lock);

rtm's avatar
rtm committed
  rr = &freelist;
  while(*rr){
    struct run *r = *rr;
    if(r->len == n){
      *rr = r->next;
rtm's avatar
rtm committed
      release(&kalloc_lock);
rtm's avatar
rtm committed
      return (char *) r;
    }
    if(r->len > n){
      char *p = (char *)r + (r->len - n);
      r->len -= n;
rtm's avatar
rtm committed
      release(&kalloc_lock);
rtm's avatar
rtm committed
      return p;
    }
    rr = &(*rr)->next;
  }
rtm's avatar
rtm committed
  release(&kalloc_lock);
rtm's avatar
rtm committed
  cprintf("kalloc: out of memory\n");
rtm's avatar
rtm committed
  return 0;
}