Newer
Older
#include "types.h"
#include "mmu.h"
#include "x86.h"
#include "param.h"
struct spinlock proc_table_lock;
extern void forkret1(struct trapframe*);
void
pinit(void)
{
initlock(&proc_table_lock, "proc_table");
}
// Set up CPU's segment descriptors and task state for a
// given process.
// If p==0, set up for "idle" state for when scheduler()
// is idling, not running any process.
struct cpu *c = &cpus[cpu()];
c->ts.ss0 = SEG_KDATA << 3;
if(p){
c->ts.esp0 = (uint)(p->kstack + KSTACKSIZE);
} else {
c->ts.esp0 = 0xffffffff;
}
c->gdt[0] = SEG_NULL;
c->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0x100000 + 64*1024, 0); // xxx
c->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0);
c->gdt[SEG_TSS] = SEG16(STS_T32A, (uint) &c->ts, sizeof(c->ts), 0);
c->gdt[SEG_TSS].s = 0;
if(p){
c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, (uint)p->mem, p->sz, 3);
c->gdt[SEG_UDATA] = SEG(STA_W, (uint)p->mem, p->sz, 3);
} else {
c->gdt[SEG_UCODE] = SEG_NULL;
c->gdt[SEG_UDATA] = SEG_NULL;
}
lgdt(c->gdt, sizeof c->gdt);
ltr(SEG_TSS << 3);
// Look in the process table for an UNUSED proc.
// If found, change state to EMBRYO and return it.
// Otherwise return 0.
struct proc*
allocproc(void)
{
int i;
struct proc *p;
for(i = 0; i < NPROC; i++){
p = &proc[i];
if(p->state == UNUSED){
p->state = EMBRYO;
return p;
}
}
return 0;
}
// Instead, sets up stack to return as if from system call.
// Caller must arrange for process to run (set state to RUNNABLE).
np->sz = p->sz;
np->mem = kalloc(np->sz);
if(np->mem == 0){
np->state = UNUSED;
np->tf = (struct trapframe*)(np->kstack + KSTACKSIZE) - 1;
memset(&np->jmpbuf, 0, sizeof np->jmpbuf);
np->jmpbuf.eip = (uint)forkret;
np->jmpbuf.esp = (uint)np->tf - 4;
growproc(int n)
{
struct proc *cp = curproc[cpu()];
char *newmem, *oldmem;
newmem = kalloc(cp->sz + n);
memmove(newmem, cp->mem, cp->sz);
memset(newmem + cp->sz, 0, n);
oldmem = cp->mem;
cp->mem = newmem;
kfree(oldmem, cp->sz);
cp->sz += n;
// Each CPU calls scheduler() after setting itself up.
// Scheduler never returns. It loops, doing:
// - choose a process to run
// - longjmp to start running that process
// - eventually that process transfers control back
// via longjmp back to the top of scheduler.
for(;;){
// Loop over process table looking for process to run.
acquire(&proc_table_lock);
// Switch to chosen process. It is the process's job
// to release proc_table_lock and then reacquire it
// before jumping back to us.
curproc[cpu()] = p;
p->state = RUNNING;
if(setjmp(&cpus[cpu()].jmpbuf) == 0)
longjmp(&p->jmpbuf);
// It should have changed its p->state before coming back.
curproc[cpu()] = 0;
// Enter scheduler. Must already hold proc_table_lock
// and have changed curproc[cpu()]->state.
void
sched(void)
{
struct proc *p = curproc[cpu()];
if(setjmp(&p->jmpbuf) == 0)
acquire(&proc_table_lock);
p->state = RUNNABLE;
sched();
release(&proc_table_lock);
// A fork child's very first scheduling by scheduler()
// will longjmp here. "return" to user space.
void
forkret(void)
{
// Still holding proc_table_lock from scheduler.
release(&proc_table_lock);
// Jump into assembly, never to return.
forkret1(curproc[cpu()]->tf);
}
// Atomically release lock and sleep on chan.
// Reacquires lock when reawakened.
struct proc *p = curproc[cpu()];
if(p == 0)
panic("sleep");
// guaranteed that we won't miss any wakeup
// (wakeup runs with proc_table_lock locked),
// so it's okay to release lk.
if(lk != &proc_table_lock){
acquire(&proc_table_lock);
release(lk);
}
// Go to sleep.
// Reacquire original lock.
if(lk != &proc_table_lock){
release(&proc_table_lock);
acquire(lk);
}
// Wake up all processes sleeping on chan.
// Proc_table_lock is acquired and released.
void
wakeup(void *chan)
{
acquire(&proc_table_lock);
wakeup1(chan);
// Kill the process with the given pid.
// Process won't actually exit until it returns
// to user space (see trap in trap.c).
int
proc_kill(int pid)
struct proc *p;
acquire(&proc_table_lock);
for(p = proc; p < &proc[NPROC]; p++){
if(p->pid == pid){
p->killed = 1;
// Wake process from sleep if necessary.
if(p->state == SLEEPING)
p->state = RUNNABLE;
release(&proc_table_lock);
return 0;
}
}
release(&proc_table_lock);
return -1;
{
struct proc *p;
struct proc *cp = curproc[cpu()];
int fd;
for(fd = 0; fd < NOFILE; fd++){
if(cp->fds[fd]){
fd_close(cp->fds[fd]);
cp->fds[fd] = 0;
}
}
for(p = proc; p < &proc[NPROC]; p++)
if(p->pid == cp->ppid)
for(p = proc; p < &proc[NPROC]; p++)
if(p->ppid == cp->pid)
// Wait for a child process to exit and return its pid.
// Return -1 if this process has no children.
int
proc_wait(void)
{
struct proc *p;
struct proc *cp = curproc[cpu()];
acquire(&proc_table_lock);
if(p->state == ZOMBIE){
// Found one.
kfree(p->mem, p->sz);
kfree(p->kstack, KSTACKSIZE);
pid = p->pid;
p->state = UNUSED;
p->pid = 0;
release(&proc_table_lock);
return pid;
}
havekids = 1;
release(&proc_table_lock);
return -1;
}