Newer
Older
void
pinit(void)
{
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
//PAGEBREAK: 36
// Print a process listing to console. For debugging.
// Runs when user types ^P on console.
// No lock to avoid wedging a stuck machine further.
void
procdump(void)
{
static char *states[] = {
[UNUSED] "unused",
[EMBRYO] "embryo",
[SLEEPING] "sleep ",
[RUNNABLE] "runble",
[RUNNING] "run ",
[ZOMBIE] "zombie"
};
int i;
struct proc *p;
char *state;
uint pc[10];
for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
if(p->state == UNUSED)
continue;
if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
state = states[p->state];
else
state = "???";
cprintf("%d %s %s", p->pid, state, p->name);
if(p->state == SLEEPING){
getcallerpcs((uint*)p->context->ebp+2, pc);
for(i=0; i<10 && pc[i] != 0; i++)
cprintf(" %p", pc[i]);
}
cprintf("\n");
}
}
// Set up CPU's kernel segment descriptors.
// Run once at boot time on each CPU.
void
ksegment(void)
{
struct cpu *c1;
c1 = &cpus[cpu()];
c1->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0x100000 + 64*1024-1, 0);
c1->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0);
c1->gdt[SEG_KCPU] = SEG(STA_W, (uint)(&c1->tls+1), 0xffffffff, 0);
lgdt(c1->gdt, sizeof(c1->gdt));
loadfsgs(SEG_KCPU << 3);
// Initialize cpu-local variables.
c = c1;
cp = 0;
}
// Set up CPU's segment descriptors and current process task state.
// If cp==0, set up for "idle" state for when scheduler() is running.
void
usegment(void)
{
pushcli();
c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, (uint)cp->mem, cp->sz-1, DPL_USER);
c->gdt[SEG_UDATA] = SEG(STA_W, (uint)cp->mem, cp->sz-1, DPL_USER);
c->gdt[SEG_TSS] = SEG16(STS_T32A, (uint)&c->ts, sizeof(c->ts)-1, 0);
c->gdt[SEG_TSS].s = 0;
c->ts.ss0 = SEG_KDATA << 3;
c->ts.esp0 = (uint)cp->kstack + KSTACKSIZE;
ltr(SEG_TSS << 3);
popcli();
}
//PAGEBREAK: 15
// Look in the process table for an UNUSED proc.
// If found, change state to EMBRYO and return it.
// Otherwise return 0.
static struct proc*
allocproc(void)
for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
if(p->state == UNUSED)
// Allocate kernel stack if necessary.
if((p->kstack = kalloc(KSTACKSIZE)) == 0){
p->state = UNUSED;
return 0;
}
sp = p->kstack + KSTACKSIZE;
// Leave room for trap frame.
sp -= sizeof *p->tf;
p->tf = (struct trapframe*)sp;
// Set up new context to start executing at forkret,
// which returns to trapret (see below).
sp -= 4;
*(uint*)sp = (uint)trapret;
sp -= sizeof *p->context;
p->context = (struct context*)sp;
memset(p->context, 0, sizeof *p->context);
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
// Set up first user process.
void
userinit(void)
{
struct proc *p;
extern char _binary_initcode_start[], _binary_initcode_size[];
p = allocproc();
initproc = p;
// Initialize memory from initcode.S
p->sz = PAGE;
p->mem = kalloc(p->sz);
memset(p->mem, 0, p->sz);
memmove(p->mem, _binary_initcode_start, (int)_binary_initcode_size);
memset(p->tf, 0, sizeof(*p->tf));
p->tf->cs = (SEG_UCODE << 3) | DPL_USER;
p->tf->ds = (SEG_UDATA << 3) | DPL_USER;
p->tf->es = p->tf->ds;
p->tf->ss = p->tf->ds;
p->tf->eflags = FL_IF;
p->tf->esp = p->sz;
p->tf->eip = 0; // beginning of initcode.S
safestrcpy(p->name, "initcode", sizeof(p->name));
p->cwd = namei("/");
p->state = RUNNABLE;
}
memmove(newmem, cp->mem, cp->sz);
memset(newmem + cp->sz, 0, n);
// Sets up stack to return as if from system call.
// Caller must set state of returned proc to RUNNABLE.
if((np->mem = kalloc(np->sz)) == 0){
kfree(np->kstack, KSTACKSIZE);
np->kstack = 0;
memmove(np->mem, cp->mem, np->sz);
np->parent = cp;
*np->tf = *cp->tf;
// Clear %eax so that fork returns 0 in the child.
np->tf->eax = 0;
for(i = 0; i < NOFILE; i++)
if(cp->ofile[i])
np->ofile[i] = filedup(cp->ofile[i]);
np->cwd = idup(cp->cwd);
pid = np->pid;
np->state = RUNNABLE;
return pid;
// Each CPU calls scheduler() after setting itself up.
// Scheduler never returns. It loops, doing:
// - choose a process to run
// - swtch to start running that process
// - eventually that process transfers control
// via swtch back to the scheduler.
// Switch to chosen process. It is the process's job
// Enter scheduler. Must hold only ptable.lock
int intena;
if(cp->state == RUNNING)
panic("sched running");
if(readeflags()&FL_IF)
panic("sched interruptible");
// Return to "caller", actually trapret (see allocproc).
// Atomically release lock and sleep on chan.
// Reacquires lock when reawakened.
if(lk != &ptable.lock){ //DOC: sleeplock0
acquire(&ptable.lock); //DOC: sleeplock1
// Kill the process with the given pid.
// Process won't actually exit until it returns
// to user space (see trap in trap.c).
int
if(p->pid == pid){
p->killed = 1;
// Wake process from sleep if necessary.
if(p->state == SLEEPING)
p->state = RUNNABLE;
{
struct proc *p;
int fd;
if(p->state == ZOMBIE)
wakeup1(initproc);
// Jump into the scheduler, never to return.
cp->state = ZOMBIE;
sched();
panic("zombie exit");
// Wait for a child process to exit and return its pid.
// Return -1 if this process has no children.
{
struct proc *p;
havekids = 1;
if(p->state == ZOMBIE){
// Found one.
pid = p->pid;
kfree(p->mem, p->sz);
kfree(p->kstack, KSTACKSIZE);
p->state = UNUSED;
p->pid = 0;
p->parent = 0;
p->name[0] = 0;
p->killed = 0;
release(&ptable.lock);
return pid;