Skip to content
Snippets Groups Projects
kalloc.c 2.6 KiB
Newer Older
rsc's avatar
rsc committed
// Physical memory allocator, intended to allocate
// memory for user processes. Allocates in 4096-byte "pages".
// Free list is kept sorted and combines adjacent pages into
// long runs, to make it easier to allocate big segments.
// One reason the page size is 4k is that the x86 segment size
// granularity is 4k.
rtm's avatar
rtm committed

#include "param.h"
#include "types.h"
#include "defs.h"
#include "param.h"
#include "mmu.h"
#include "proc.h"
rtm's avatar
rtm committed
#include "spinlock.h"

struct spinlock kalloc_lock;
rtm's avatar
rtm committed

struct run {
  struct run *next;
  int len; // bytes
};
struct run *freelist;

rsc's avatar
rsc committed
// Initialize free list of physical pages.
// This code cheats by just considering one megabyte of
// pages after _end.  Real systems would determine the
// amount of memory available in the system and use it all.
rtm's avatar
rtm committed
void
rsc's avatar
rsc committed
kinit(void)
rtm's avatar
rtm committed
{
  extern int end;
rtm's avatar
rtm committed
  char *start;
  initlock(&kalloc_lock, "kalloc");
rsc's avatar
rsc committed
  start = (char*) &end;
  start = (char*) (((uint)start + PAGE) & ~(PAGE-1));
rtm's avatar
rtm committed
  mem = 256; // assume 256 pages of RAM
rtm's avatar
rtm committed
  cprintf("mem = %d\n", mem * PAGE);
  kfree(start, mem * PAGE);
}

void
kfree(char *cp, int len)
{
  struct run **rr;
rsc's avatar
rsc committed
  struct run *p = (struct run*) cp;
  struct run *pend = (struct run*) (cp + len);
rtm's avatar
rtm committed
  int i;
rtm's avatar
rtm committed

  if(len % PAGE)
    panic("kfree");

rtm's avatar
rtm committed
  // XXX fill with junk to help debug
  for(i = 0; i < len; i++)
    cp[i] = 1;

rtm's avatar
rtm committed
  acquire(&kalloc_lock);

rtm's avatar
rtm committed
  rr = &freelist;
  while(*rr){
rsc's avatar
rsc committed
    struct run *rend = (struct run*) ((char*)(*rr) + (*rr)->len);
rtm's avatar
rtm committed
    if(p >= *rr && p < rend)
      panic("freeing free page");
    if(pend == *rr){
      p->len = len + (*rr)->len;
      p->next = (*rr)->next;
      *rr = p;
rtm's avatar
rtm committed
      goto out;
rtm's avatar
rtm committed
    }
    if(pend < *rr){
      p->len = len;
      p->next = *rr;
      *rr = p;
rtm's avatar
rtm committed
      goto out;
rtm's avatar
rtm committed
    }
    if(p == rend){
      (*rr)->len += len;
      if((*rr)->next && (*rr)->next == pend){
        (*rr)->len += (*rr)->next->len;
        (*rr)->next = (*rr)->next->next;
      }
rtm's avatar
rtm committed
      goto out;
rtm's avatar
rtm committed
    }
    rr = &((*rr)->next);
  }
  p->len = len;
  p->next = 0;
  *rr = p;
rtm's avatar
rtm committed

 out:
  release(&kalloc_lock);
rtm's avatar
rtm committed
}

rsc's avatar
rsc committed
// Allocate n bytes of physical memory.
// Returns a kernel-segment pointer.
// Returns 0 if the memory cannot be allocated.
rsc's avatar
rsc committed
char*
rtm's avatar
rtm committed
kalloc(int n)
{
  struct run **rr;

  if(n % PAGE)
    panic("kalloc");

rtm's avatar
rtm committed
  acquire(&kalloc_lock);

rtm's avatar
rtm committed
  rr = &freelist;
  while(*rr){
    struct run *r = *rr;
    if(r->len == n){
      *rr = r->next;
rtm's avatar
rtm committed
      release(&kalloc_lock);
rsc's avatar
rsc committed
      return (char*) r;
rtm's avatar
rtm committed
    }
    if(r->len > n){
rsc's avatar
rsc committed
      char *p = (char*)r + (r->len - n);
rtm's avatar
rtm committed
      r->len -= n;
rtm's avatar
rtm committed
      release(&kalloc_lock);
rtm's avatar
rtm committed
      return p;
    }
    rr = &(*rr)->next;
  }
rtm's avatar
rtm committed
  release(&kalloc_lock);
rtm's avatar
rtm committed
  cprintf("kalloc: out of memory\n");
rtm's avatar
rtm committed
  return 0;
}