Newer
Older
#include "types.h"
#include "mmu.h"
#include "x86.h"
#include "param.h"
/*
* set up a process's task state and segment descriptors
* correctly, given its current size and address in memory.
* this should be called whenever the latter change.
* doesn't change the cpu's current segmentation setup.
*/
void
setupsegs(struct proc *p)
{
memset(&p->ts, 0, sizeof(struct Taskstate));
p->ts.ts_ss0 = SEG_KDATA << 3;
p->ts.ts_esp0 = (unsigned)(p->kstack + KSTACKSIZE);
// XXX it may be wrong to modify the current segment table!
p->gdt[0] = SEG_NULL;
p->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0xffffffff, 0);
p->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0);
p->gdt[SEG_TSS] = SEG16(STS_T32A, (unsigned) &p->ts,
sizeof(p->ts), 0);
p->gdt[SEG_TSS].sd_s = 0;
p->gdt[SEG_UCODE] = SEG(STA_X|STA_R, (unsigned)p->mem, p->sz, 3);
p->gdt[SEG_UDATA] = SEG(STA_W, (unsigned)p->mem, p->sz, 3);
p->gdt_pd.pd__garbage = 0;
p->gdt_pd.pd_lim = sizeof(p->gdt) - 1;
p->gdt_pd.pd_base = (unsigned) p->gdt;
}
extern void trapret();
/*
* internal fork(). does not copy kernel stack; instead,
* sets up the stack to return as if from system call.
acquire(&proc_table_lock);
for(np = &proc[1]; np < &proc[NPROC]; np++){
if(np->state == UNUSED){
np->state = EMBRYO;
}
}
if(np >= &proc[NPROC]){
release(&proc_table_lock);
// copy from proc[0] if we're bootstrapping
op = curproc[cpu()];
if(op == 0)
op = &proc[0];
return 0;
}
setupsegs(np);
// set up kernel stack to return to user space
np->tf = (struct Trapframe *) (np->kstack + KSTACKSIZE - sizeof(struct Trapframe));
np->tf->tf_regs.reg_eax = 0; // so fork() returns 0 in child
// set up new jmpbuf to start executing at trapret with esp pointing at tf
memset(&np->jmpbuf, 0, sizeof np->jmpbuf);
np->jmpbuf.jb_eip = (unsigned) trapret;
np->jmpbuf.jb_esp = (unsigned) np->tf - 4; // -4 for the %eip that isn't actually there
// copy file descriptors
for(fd = 0; fd < NOFILE; fd++){
np->fds[fd] = op->fds[fd];
if(np->fds[fd])
struct proc *op, *np;
cprintf("start scheduler on cpu %d jmpbuf %p\n", cpu(), &cpus[cpu()].jmpbuf);
cpus[cpu()].lastproc = &proc[0];
setjmp(&cpus[cpu()].jmpbuf);
if(op == 0 || op->mtx != &proc_table_lock)
acquire1(&proc_table_lock, op);
if(op){
if(op->newstate <= 0 || op->newstate > ZOMBIE)
panic("scheduler");
op->state = op->newstate;
op->newstate = -1;
if(op->mtx){
struct spinlock *mtx = op->mtx;
op->mtx = 0;
if(mtx != &proc_table_lock)
release1(mtx, op);
}
// find a runnable process and switch to it
curproc[cpu()] = 0;
np = cpus[cpu()].lastproc + 1;
for(i = 0; i < NPROC; i++){
if(np >= &proc[NPROC])
np = &proc[0];
release1(&proc_table_lock, op);
op = 0;
acquire(&proc_table_lock);
cpus[cpu()].lastproc = np;
// h/w sets busy bit in TSS descriptor sometimes, and faults
// if it's set in LTR. so clear tss descriptor busy bit.
// XXX should probably have an lgdt() function in x86.h
// to confine all the inline assembly.
// XXX probably ought to lgdt on trap return too, in case
// a system call has moved a program or changed its size.
asm volatile("lgdt %0" : : "g" (np->gdt_pd.pd_lim));
ltr(SEG_TSS << 3);
if(0) cprintf("cpu%d: run %d esp=%p callerpc=%p\n", cpu(), np-proc);
longjmp(&np->jmpbuf);
}
// give up the cpu by switching to the scheduler,
// which runs on the per-cpu stack.
void
{
struct proc *p = curproc[cpu()];
if(p->mtx && p->locks != 1)
panic("swtch w/ locks 1");
if(p->mtx && p->mtx->locked == 0)
panic("switch w/ lock but not held");
if(p->locks && (read_eflags() & FL_IF))
panic("swtch w/ lock but FL_IF");
p->newstate = newstate; // basically an argument to scheduler()
if(setjmp(&p->jmpbuf) == 0)
longjmp(&cpus[cpu()].jmpbuf);
struct proc *p = curproc[cpu()];
if(p == 0)
panic("sleep");
for(p = proc; p < &proc[NPROC]; p++)
if(p->state == WAITING && p->chan == chan)
}
void
wakeup(void *chan)
{
acquire(&proc_table_lock);
wakeup1(chan);
// give up the CPU but stay marked as RUNNABLE
void
yield()
{
if(curproc[cpu()] == 0 || curproc[cpu()]->state != RUNNING)
panic("yield");
}
void
proc_exit()
{
struct proc *p;
struct proc *cp = curproc[cpu()];
int fd;
for(fd = 0; fd < NOFILE; fd++){
if(cp->fds[fd]){
fd_close(cp->fds[fd]);
cp->fds[fd] = 0;
}
}
// wake up parent
for(p = proc; p < &proc[NPROC]; p++)
if(p->pid == cp->ppid)
// abandon children
for(p = proc; p < &proc[NPROC]; p++)
if(p->ppid == cp->pid)
p->pid = 1;
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
int
proc_wait(void)
{
struct proc *p;
struct proc *cp = curproc[cpu()];
int any, pid;
acquire(&proc_table_lock);
while(1){
any = 0;
for(p = proc; p < &proc[NPROC]; p++){
if(p->state == ZOMBIE && p->ppid == cp->pid){
kfree(p->mem, p->sz);
kfree(p->kstack, KSTACKSIZE);
pid = p->pid;
p->state = UNUSED;
release(&proc_table_lock);
return pid;
}
if(p->state != UNUSED && p->ppid == cp->pid)
any = 1;
}
if(any == 0){
release(&proc_table_lock);
return -1;
}
sleep(cp, &proc_table_lock);
}
}
int
proc_kill(int pid)
{
struct proc *p;
acquire(&proc_table_lock);
for(p = proc; p < &proc[NPROC]; p++){
if(p->pid == pid && p->state != UNUSED){
p->killed = 1;
if(p->state == WAITING)
p->state = RUNNABLE;
release(&proc_table_lock);
return 0;
}
}
release(&proc_table_lock);
return -1;
}
// disable interrupts
void
cli(void)
{
cpus[cpu()].clis += 1;
if((read_eflags() & FL_IF) != 0)
panic("cli but enabled");
}
// enable interrupts
void
sti(void)
{
if((read_eflags() & FL_IF) != 0)
panic("sti but enabled");