Newer
Older
void
pinit(void)
{
// Look in the process table for an UNUSED proc.
// If found, change state to EMBRYO and return it.
// Otherwise return 0.
static struct proc*
allocproc(void)
for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
if(p->state == UNUSED)
// Allocate kernel stack if necessary.
if((p->kstack = kalloc(KSTACKSIZE)) == 0){
p->state = UNUSED;
return 0;
}
sp = p->kstack + KSTACKSIZE;
// Leave room for trap frame.
sp -= sizeof *p->tf;
p->tf = (struct trapframe*)sp;
// Set up new context to start executing at forkret,
// which returns to trapret (see below).
sp -= 4;
*(uint*)sp = (uint)trapret;
sp -= sizeof *p->context;
p->context = (struct context*)sp;
memset(p->context, 0, sizeof *p->context);
memmove(newmem, cp->mem, cp->sz);
memset(newmem + cp->sz, 0, n);
struct cpu *c1;
c1 = &cpus[cpu()];
c1->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0x100000 + 64*1024-1, 0);
c1->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0);
c1->gdt[SEG_KCPU] = SEG(STA_W, (uint)(&c1->tls+1), 0xffffffff, 0);
// Initialize cpu-local variables.
c = c1;
cp = 0;
}
// Set up CPU's segment descriptors and task state for the current process.
// If cp==0, set up for "idle" state for when scheduler() is running.
void
usegment(void)
{
c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, (uint)cp->mem, cp->sz-1, DPL_USER);
c->gdt[SEG_UDATA] = SEG(STA_W, (uint)cp->mem, cp->sz-1, DPL_USER);
c->gdt[SEG_TSS] = SEG16(STS_T32A, (uint)&c->ts, sizeof(c->ts)-1, 0);
c->gdt[SEG_TSS].s = 0;
c->ts.ss0 = SEG_KDATA << 3;
c->ts.esp0 = (uint)cp->kstack + KSTACKSIZE;
// Sets up stack to return as if from system call.
// Caller must set state of returned proc to RUNNABLE.
if((np->mem = kalloc(np->sz)) == 0){
kfree(np->kstack, KSTACKSIZE);
np->kstack = 0;
memmove(np->mem, cp->mem, np->sz);
np->parent = cp;
*np->tf = *cp->tf;
// Clear %eax so that fork returns 0 in the child.
np->tf->eax = 0;
for(i = 0; i < NOFILE; i++)
if(cp->ofile[i])
np->ofile[i] = filedup(cp->ofile[i]);
np->cwd = idup(cp->cwd);
pid = np->pid;
np->state = RUNNABLE;
return pid;
// Set up first user process.
void
userinit(void)
{
struct proc *p;
extern char _binary_initcode_start[], _binary_initcode_size[];
p->tf->ds = (SEG_UDATA << 3) | DPL_USER;
p->tf->es = p->tf->ds;
p->tf->ss = p->tf->ds;
// Each CPU calls scheduler() after setting itself up.
// Scheduler never returns. It loops, doing:
// - choose a process to run
// - swtch to start running that process
// - eventually that process transfers control
// via swtch back to the scheduler.
// Switch to chosen process. It is the process's job
// Enter scheduler. Must hold only ptable.lock
int intena;
if(cp->state == RUNNING)
panic("sched running");
if(readeflags()&FL_IF)
panic("sched interruptible");
// Return to "caller", actually trapret (see allocproc).
// Atomically release lock and sleep on chan.
// Reacquires lock when reawakened.
if(lk != &ptable.lock){ //DOC: sleeplock0
acquire(&ptable.lock); //DOC: sleeplock1
// Kill the process with the given pid.
// Process won't actually exit until it returns
// to user space (see trap in trap.c).
int
if(p->pid == pid){
p->killed = 1;
// Wake process from sleep if necessary.
if(p->state == SLEEPING)
p->state = RUNNABLE;
{
struct proc *p;
int fd;
if(p->state == ZOMBIE)
wakeup1(initproc);
// Jump into the scheduler, never to return.
cp->state = ZOMBIE;
sched();
panic("zombie exit");
// Wait for a child process to exit and return its pid.
// Return -1 if this process has no children.
{
struct proc *p;
havekids = 1;
if(p->state == ZOMBIE){
// Found one.
pid = p->pid;
kfree(p->mem, p->sz);
kfree(p->kstack, KSTACKSIZE);
p->state = UNUSED;
p->pid = 0;
p->parent = 0;
p->name[0] = 0;
p->killed = 0;
release(&ptable.lock);
return pid;
// Print a process listing to console. For debugging.
// Runs when user types ^P on console.
// No lock to avoid wedging a stuck machine further.
void
procdump(void)
{
[UNUSED] "unused",
[EMBRYO] "embryo",
[SLEEPING] "sleep ",
[RUNNABLE] "runble",
[RUNNING] "run ",
[ZOMBIE] "zombie"