Skip to content
Snippets Groups Projects
Commit 65bd8e13 authored by rsc's avatar rsc
Browse files

New scheduler.

Removed cli and sti stack in favor of tracking
number of locks held on each CPU and explicit
conditionals in spinlock.c.
parent 40a2a083
No related branches found
No related tags found
No related merge requests found
...@@ -113,7 +113,7 @@ void ...@@ -113,7 +113,7 @@ void
cprintf(char *fmt, ...) cprintf(char *fmt, ...)
{ {
int i, state = 0, c; int i, state = 0, c;
unsigned int *ap = (unsigned int *) &fmt + 1; unsigned int *ap = (unsigned int *)(void*)&fmt + 1;
if(use_console_lock) if(use_console_lock)
acquire(&console_lock); acquire(&console_lock);
......
...@@ -13,7 +13,6 @@ struct proc; ...@@ -13,7 +13,6 @@ struct proc;
struct jmpbuf; struct jmpbuf;
void setupsegs(struct proc *); void setupsegs(struct proc *);
struct proc * newproc(void); struct proc * newproc(void);
void swtch(int);
struct spinlock; struct spinlock;
void sleep(void *, struct spinlock *); void sleep(void *, struct spinlock *);
void wakeup(void *); void wakeup(void *);
...@@ -22,8 +21,6 @@ void proc_exit(void); ...@@ -22,8 +21,6 @@ void proc_exit(void);
int proc_kill(int); int proc_kill(int);
int proc_wait(void); int proc_wait(void);
void yield(void); void yield(void);
void cli(void);
void sti(void);
// swtch.S // swtch.S
struct jmpbuf; struct jmpbuf;
......
...@@ -107,7 +107,7 @@ romimage: file=$BXSHARE/BIOS-bochs-latest, address=0xf0000 ...@@ -107,7 +107,7 @@ romimage: file=$BXSHARE/BIOS-bochs-latest, address=0xf0000
# 650Mhz Athlon K-7 with Linux 2.4.4/egcs-2.91.66 2 to 2.5 Mips # 650Mhz Athlon K-7 with Linux 2.4.4/egcs-2.91.66 2 to 2.5 Mips
# 400Mhz Pentium II with Linux 2.0.36/egcs-1.0.3 1 to 1.8 Mips # 400Mhz Pentium II with Linux 2.0.36/egcs-1.0.3 1 to 1.8 Mips
#======================================================================= #=======================================================================
cpu: count=2, ips=10000000 cpu: count=2, ips=10000000, reset_on_triple_fault=0
#======================================================================= #=======================================================================
# MEGS # MEGS
......
...@@ -18,19 +18,19 @@ extern uint8_t _binary_userfs_start[], _binary_userfs_size[]; ...@@ -18,19 +18,19 @@ extern uint8_t _binary_userfs_start[], _binary_userfs_size[];
extern int use_console_lock; extern int use_console_lock;
struct spinlock sillylock; // hold this to keep interrupts disabled
int int
main() main()
{ {
struct proc *p; struct proc *p;
if (acpu) { if (acpu) {
cpus[cpu()].clis = 1;
cprintf("an application processor\n"); cprintf("an application processor\n");
idtinit(); // CPU's idt idtinit(); // CPU's idt
lapic_init(cpu()); lapic_init(cpu());
lapic_timerinit(); lapic_timerinit();
lapic_enableintr(); lapic_enableintr();
sti();
scheduler(); scheduler();
} }
acpu = 1; acpu = 1;
...@@ -40,10 +40,9 @@ main() ...@@ -40,10 +40,9 @@ main()
mp_init(); // collect info about this machine mp_init(); // collect info about this machine
acquire(&sillylock);
use_console_lock = 1; use_console_lock = 1;
cpus[cpu()].clis = 1; // cpu starts as if we had called cli()
lapic_init(mp_bcpu()); lapic_init(mp_bcpu());
cprintf("\nxV6\n\n"); cprintf("\nxV6\n\n");
...@@ -56,7 +55,7 @@ main() ...@@ -56,7 +55,7 @@ main()
// create fake process zero // create fake process zero
p = &proc[0]; p = &proc[0];
memset(p, 0, sizeof *p); memset(p, 0, sizeof *p);
p->state = WAITING; p->state = SLEEPING;
p->sz = 4 * PAGE; p->sz = 4 * PAGE;
p->mem = kalloc(p->sz); p->mem = kalloc(p->sz);
memset(p->mem, 0, p->sz); memset(p->mem, 0, p->sz);
...@@ -88,6 +87,7 @@ main() ...@@ -88,6 +87,7 @@ main()
//load_icode(p, _binary_userfs_start, (unsigned) _binary_userfs_size); //load_icode(p, _binary_userfs_start, (unsigned) _binary_userfs_size);
p->state = RUNNABLE; p->state = RUNNABLE;
cprintf("loaded userfs\n"); cprintf("loaded userfs\n");
release(&sillylock);
scheduler(); scheduler();
......
...@@ -12,6 +12,7 @@ struct spinlock proc_table_lock; ...@@ -12,6 +12,7 @@ struct spinlock proc_table_lock;
struct proc proc[NPROC]; struct proc proc[NPROC];
struct proc *curproc[NCPU]; struct proc *curproc[NCPU];
int next_pid = 1; int next_pid = 1;
extern void forkret(void);
/* /*
* set up a process's task state and segment descriptors * set up a process's task state and segment descriptors
...@@ -96,12 +97,14 @@ newproc() ...@@ -96,12 +97,14 @@ newproc()
*(np->tf) = *(op->tf); *(np->tf) = *(op->tf);
np->tf->tf_regs.reg_eax = 0; // so fork() returns 0 in child np->tf->tf_regs.reg_eax = 0; // so fork() returns 0 in child
// set up new jmpbuf to start executing at trapret with esp pointing at tf // Set up new jmpbuf to start executing forkret (see trapasm.S)
// with esp pointing at tf. Forkret will call forkret1 (below) to release
// the proc_table_lock and then jump into the usual trap return code.
memset(&np->jmpbuf, 0, sizeof np->jmpbuf); memset(&np->jmpbuf, 0, sizeof np->jmpbuf);
np->jmpbuf.jb_eip = (unsigned) trapret; np->jmpbuf.jb_eip = (unsigned) forkret;
np->jmpbuf.jb_esp = (unsigned) np->tf - 4; // -4 for the %eip that isn't actually there np->jmpbuf.jb_esp = (unsigned) np->tf - 4; // -4 for the %eip that isn't actually there
// copy file descriptors // Copy file descriptors
for(fd = 0; fd < NOFILE; fd++){ for(fd = 0; fd < NOFILE; fd++){
np->fds[fd] = op->fds[fd]; np->fds[fd] = op->fds[fd];
if(np->fds[fd]) if(np->fds[fd])
...@@ -111,128 +114,153 @@ newproc() ...@@ -111,128 +114,153 @@ newproc()
return np; return np;
} }
void
forkret1(void)
{
release(&proc_table_lock);
}
// Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up.
// Scheduler never returns. It loops, doing:
// - choose a process to run
// - longjmp to start running that process
// - eventually that process transfers control back
// via longjmp back to the top of scheduler.
void void
scheduler(void) scheduler(void)
{ {
struct proc *op, *np; struct proc *p;
int i; int i;
cprintf("start scheduler on cpu %d jmpbuf %p\n", cpu(), &cpus[cpu()].jmpbuf); cprintf("start scheduler on cpu %d jmpbuf %p\n", cpu(), &cpus[cpu()].jmpbuf);
cpus[cpu()].lastproc = &proc[0]; cpus[cpu()].lastproc = &proc[0];
setjmp(&cpus[cpu()].jmpbuf); for(;;){
// Loop over process table looking for process to run.
op = curproc[cpu()]; acquire(&proc_table_lock);
if(op == 0 || op->mtx != &proc_table_lock)
acquire1(&proc_table_lock, op);
if(op){
if(op->newstate <= 0 || op->newstate > ZOMBIE)
panic("scheduler");
op->state = op->newstate;
op->newstate = -1;
if(op->mtx){
struct spinlock *mtx = op->mtx;
op->mtx = 0;
if(mtx != &proc_table_lock)
release1(mtx, op);
}
}
// find a runnable process and switch to it
curproc[cpu()] = 0;
np = cpus[cpu()].lastproc + 1;
while(1){
for(i = 0; i < NPROC; i++){ for(i = 0; i < NPROC; i++){
if(np >= &proc[NPROC]) p = &proc[i];
np = &proc[0]; if(p->state != RUNNABLE)
if(np->state == RUNNABLE) continue;
break;
np++; // Run this process.
} // XXX move this into swtch or trapret or something.
// It can run on the other stack.
if(i < NPROC){ // h/w sets busy bit in TSS descriptor sometimes, and faults
np->state = RUNNING; // if it's set in LTR. so clear tss descriptor busy bit.
release1(&proc_table_lock, op); p->gdt[SEG_TSS].sd_type = STS_T32A;
break;
// XXX should probably have an lgdt() function in x86.h
// to confine all the inline assembly.
// XXX probably ought to lgdt on trap return too, in case
// a system call has moved a program or changed its size.
asm volatile("lgdt %0" : : "g" (p->gdt_pd.pd_lim));
ltr(SEG_TSS << 3);
// Switch to chosen process. It is the process's job
// to release proc_table_lock and then reacquire it
// before jumping back to us.
if(0) cprintf("cpu%d: run %d\n", cpu(), p-proc);
curproc[cpu()] = p;
p->state = RUNNING;
if(setjmp(&cpus[cpu()].jmpbuf) == 0)
longjmp(&p->jmpbuf);
// Process is done running for now.
// It should have changed its p->state before coming back.
curproc[cpu()] = 0;
if(p->state == RUNNING)
panic("swtch to scheduler with state=RUNNING");
// XXX if not holding proc_table_lock panic.
} }
release(&proc_table_lock);
release1(&proc_table_lock, op); if(cpus[cpu()].nlock != 0)
op = 0; panic("holding locks in scheduler");
acquire(&proc_table_lock);
np = &proc[0]; // With proc_table_lock released, there are no
// locks held on this cpu, so interrupts are enabled.
// Hardware interrupts can happen here.
// Also, releasing the lock here lets the other CPUs
// look for runnable processes too.
} }
}
cpus[cpu()].lastproc = np; // Enter scheduler. Must already hold proc_table_lock
curproc[cpu()] = np; // and have changed curproc[cpu()]->state.
void
// h/w sets busy bit in TSS descriptor sometimes, and faults sched(void)
// if it's set in LTR. so clear tss descriptor busy bit. {
np->gdt[SEG_TSS].sd_type = STS_T32A; if(setjmp(&curproc[cpu()]->jmpbuf) == 0)
longjmp(&cpus[cpu()].jmpbuf);
// XXX should probably have an lgdt() function in x86.h
// to confine all the inline assembly.
// XXX probably ought to lgdt on trap return too, in case
// a system call has moved a program or changed its size.
asm volatile("lgdt %0" : : "g" (np->gdt_pd.pd_lim));
ltr(SEG_TSS << 3);
if(0) cprintf("cpu%d: run %d esp=%p callerpc=%p\n", cpu(), np-proc);
longjmp(&np->jmpbuf);
} }
// give up the cpu by switching to the scheduler, // Give up the CPU for one scheduling round.
// which runs on the per-cpu stack.
void void
swtch(int newstate) yield()
{ {
struct proc *p = curproc[cpu()]; struct proc *p;
if(p == 0) if((p=curproc[cpu()]) == 0 || curproc[cpu()]->state != RUNNING)
panic("swtch no proc"); panic("yield");
if(p->mtx == 0 && p->locks != 0) acquire(&proc_table_lock);
panic("swtch w/ locks"); p->state = RUNNABLE;
if(p->mtx && p->locks != 1) sched();
panic("swtch w/ locks 1"); release(&proc_table_lock);
if(p->mtx && p->mtx->locked == 0)
panic("switch w/ lock but not held");
if(p->locks && (read_eflags() & FL_IF))
panic("swtch w/ lock but FL_IF");
p->newstate = newstate; // basically an argument to scheduler()
if(setjmp(&p->jmpbuf) == 0)
longjmp(&cpus[cpu()].jmpbuf);
} }
// Atomically release lock and sleep on chan.
// Reacquires lock when reawakened.
void void
sleep(void *chan, struct spinlock *mtx) sleep(void *chan, struct spinlock *lk)
{ {
struct proc *p = curproc[cpu()]; struct proc *p = curproc[cpu()];
if(p == 0) if(p == 0)
panic("sleep"); panic("sleep");
// Must acquire proc_table_lock in order to
// change p->state and then call sched.
// Once we hold proc_table_lock, we can be
// guaranteed that we won't miss any wakeup
// (wakeup runs with proc_table_lock locked),
// so it's okay to release lk.
if(lk != &proc_table_lock){
acquire(&proc_table_lock);
release(lk);
}
// Go to sleep.
p->chan = chan; p->chan = chan;
p->mtx = mtx; // scheduler will release it p->state = SLEEPING;
sched();
swtch(WAITING); // Tidy up.
if(mtx)
acquire(mtx);
p->chan = 0; p->chan = 0;
// Reacquire original lock.
if(lk != &proc_table_lock){
release(&proc_table_lock);
acquire(lk);
}
} }
// Wake up all processes sleeping on chan.
// Proc_table_lock must be held.
void void
wakeup1(void *chan) wakeup1(void *chan)
{ {
struct proc *p; struct proc *p;
for(p = proc; p < &proc[NPROC]; p++) for(p = proc; p < &proc[NPROC]; p++)
if(p->state == WAITING && p->chan == chan) if(p->state == SLEEPING && p->chan == chan)
p->state = RUNNABLE; p->state = RUNNABLE;
} }
// Wake up all processes sleeping on chan.
// Proc_table_lock is acquired and released.
void void
wakeup(void *chan) wakeup(void *chan)
{ {
...@@ -241,15 +269,32 @@ wakeup(void *chan) ...@@ -241,15 +269,32 @@ wakeup(void *chan)
release(&proc_table_lock); release(&proc_table_lock);
} }
// give up the CPU but stay marked as RUNNABLE // Kill the process with the given pid.
void // Process won't actually exit until it returns
yield() // to user space (see trap in trap.c).
int
proc_kill(int pid)
{ {
if(curproc[cpu()] == 0 || curproc[cpu()]->state != RUNNING) struct proc *p;
panic("yield");
swtch(RUNNABLE); acquire(&proc_table_lock);
for(p = proc; p < &proc[NPROC]; p++){
if(p->pid == pid){
p->killed = 1;
// Wake process from sleep if necessary.
if(p->state == SLEEPING)
p->state = RUNNABLE;
release(&proc_table_lock);
return 0;
}
}
release(&proc_table_lock);
return -1;
} }
// Exit the current process. Does not return.
// Exited processes remain in the zombie state
// until their parent calls wait() to find out they exited.
void void
proc_exit() proc_exit()
{ {
...@@ -257,6 +302,7 @@ proc_exit() ...@@ -257,6 +302,7 @@ proc_exit()
struct proc *cp = curproc[cpu()]; struct proc *cp = curproc[cpu()];
int fd; int fd;
// Close all open files.
for(fd = 0; fd < NOFILE; fd++){ for(fd = 0; fd < NOFILE; fd++){
if(cp->fds[fd]){ if(cp->fds[fd]){
fd_close(cp->fds[fd]); fd_close(cp->fds[fd]);
...@@ -266,91 +312,60 @@ proc_exit() ...@@ -266,91 +312,60 @@ proc_exit()
acquire(&proc_table_lock); acquire(&proc_table_lock);
// wake up parent // Wake up our parent.
for(p = proc; p < &proc[NPROC]; p++) for(p = proc; p < &proc[NPROC]; p++)
if(p->pid == cp->ppid) if(p->pid == cp->ppid)
wakeup1(p); wakeup1(p);
// abandon children // Reparent our children to process 1.
for(p = proc; p < &proc[NPROC]; p++) for(p = proc; p < &proc[NPROC]; p++)
if(p->ppid == cp->pid) if(p->ppid == cp->pid)
p->pid = 1; p->ppid = 1;
cp->mtx = &proc_table_lock; // Jump into the scheduler, never to return.
swtch(ZOMBIE); cp->state = ZOMBIE;
panic("a zombie revived"); sched();
panic("zombie exit");
} }
// Wait for a child process to exit and return its pid.
// Return -1 if this process has no children.
int int
proc_wait(void) proc_wait(void)
{ {
struct proc *p; struct proc *p;
struct proc *cp = curproc[cpu()]; struct proc *cp = curproc[cpu()];
int any, pid; int i, havekids, pid;
acquire(&proc_table_lock); acquire(&proc_table_lock);
for(;;){
while(1){ // Scan through table looking zombie children.
any = 0; havekids = 0;
for(p = proc; p < &proc[NPROC]; p++){ for(i = 0; i < NPROC; i++){
if(p->state == ZOMBIE && p->ppid == cp->pid){ p = &proc[i];
kfree(p->mem, p->sz); if(p->ppid == cp->pid){
kfree(p->kstack, KSTACKSIZE); if(p->state == ZOMBIE){
pid = p->pid; // Found one.
p->state = UNUSED; kfree(p->mem, p->sz);
release(&proc_table_lock); kfree(p->kstack, KSTACKSIZE);
return pid; pid = p->pid;
p->state = UNUSED;
p->pid = 0;
release(&proc_table_lock);
return pid;
}
havekids = 1;
} }
if(p->state != UNUSED && p->ppid == cp->pid)
any = 1;
} }
if(any == 0){
// No point waiting if we don't have any children.
if(!havekids){
release(&proc_table_lock); release(&proc_table_lock);
return -1; return -1;
} }
// Wait for children to exit. (See wakeup1 call in proc_exit.)
sleep(cp, &proc_table_lock); sleep(cp, &proc_table_lock);
} }
} }
int
proc_kill(int pid)
{
struct proc *p;
acquire(&proc_table_lock);
for(p = proc; p < &proc[NPROC]; p++){
if(p->pid == pid && p->state != UNUSED){
p->killed = 1;
if(p->state == WAITING)
p->state = RUNNABLE;
release(&proc_table_lock);
return 0;
}
}
release(&proc_table_lock);
return -1;
}
// disable interrupts
void
cli(void)
{
if(cpus[cpu()].clis == 0)
__asm __volatile("cli");
cpus[cpu()].clis += 1;
if((read_eflags() & FL_IF) != 0)
panic("cli but enabled");
}
// enable interrupts
void
sti(void)
{
if((read_eflags() & FL_IF) != 0)
panic("sti but enabled");
if(cpus[cpu()].clis < 1)
panic("sti");
cpus[cpu()].clis -= 1;
if(cpus[cpu()].clis < 1)
__asm __volatile("sti");
}
...@@ -33,7 +33,7 @@ struct jmpbuf { ...@@ -33,7 +33,7 @@ struct jmpbuf {
int jb_eip; int jb_eip;
}; };
enum proc_state { UNUSED, EMBRYO, WAITING, RUNNABLE, RUNNING, ZOMBIE }; enum proc_state { UNUSED, EMBRYO, SLEEPING, RUNNABLE, RUNNING, ZOMBIE };
struct proc{ struct proc{
char *mem; // start of process's physical memory char *mem; // start of process's physical memory
...@@ -46,7 +46,6 @@ struct proc{ ...@@ -46,7 +46,6 @@ struct proc{
int ppid; int ppid;
void *chan; // sleep void *chan; // sleep
int killed; int killed;
int locks; // # of locks currently held
struct fd *fds[NOFILE]; struct fd *fds[NOFILE];
struct Taskstate ts; // only to give cpu address of kernel stack struct Taskstate ts; // only to give cpu address of kernel stack
...@@ -71,7 +70,7 @@ struct cpu { ...@@ -71,7 +70,7 @@ struct cpu {
struct jmpbuf jmpbuf; struct jmpbuf jmpbuf;
char mpstack[MPSTACK]; // per-cpu start-up stack, only used to get into main() char mpstack[MPSTACK]; // per-cpu start-up stack, only used to get into main()
struct proc *lastproc; // last proc scheduled on this cpu (never NULL) struct proc *lastproc; // last proc scheduled on this cpu (never NULL)
int clis; // cli() nesting depth int nlock; // # of locks currently held
}; };
extern struct cpu cpus[NCPU]; extern struct cpu cpus[NCPU];
......
...@@ -6,53 +6,47 @@ ...@@ -6,53 +6,47 @@
#include "proc.h" #include "proc.h"
#include "spinlock.h" #include "spinlock.h"
#define DEBUG 0 // Can't call cprintf from inside these routines,
// because cprintf uses them itself.
#define cprintf dont_use_cprintf
extern int use_console_lock; extern int use_console_lock;
int getcallerpc(void *v) { int
return ((int*)v)[-1]; getcallerpc(void *v)
{
return ((int*)v)[-1];
} }
void void
acquire1(struct spinlock * lock, struct proc *cp) acquire1(struct spinlock * lock, struct proc *cp)
{ {
if(DEBUG) cprintf("cpu%d: acquiring at %x\n", cpu(), getcallerpc(&lock)); if(cpus[cpu()].nlock++ == 0)
cli();
cli(); while(cmpxchg(0, 1, &lock->locked) == 1)
while ( cmpxchg(0, 1, &lock->locked) == 1 ) { ; } ;
lock->locker_pc = getcallerpc(&lock); cpuid(0, 0, 0, 0, 0); // memory barrier
lock->locker_pc = getcallerpc(&lock);
if(cp)
cp->locks += 1;
if(DEBUG) cprintf("cpu%d: acquired at %x\n", cpu(), getcallerpc(&lock));
} }
void void
release1(struct spinlock * lock, struct proc *cp) release1(struct spinlock * lock, struct proc *cp)
{ {
cpuid(0, 0, 0, 0, 0); // memory barrier
if(DEBUG) cprintf ("cpu%d: releasing at %x\n", cpu(), getcallerpc(&lock)); lock->locked = 0;
if(--cpus[cpu()].nlock == 0)
if(lock->locked != 1) sti();
panic("release");
if(cp)
cp->locks -= 1;
cmpxchg(1, 0, &lock->locked);
sti();
} }
void void
acquire(struct spinlock *lock) acquire(struct spinlock *lock)
{ {
acquire1(lock, curproc[cpu()]); acquire1(lock, curproc[cpu()]);
} }
void void
release(struct spinlock *lock) release(struct spinlock *lock)
{ {
release1(lock, curproc[cpu()]); release1(lock, curproc[cpu()]);
} }
...@@ -34,8 +34,9 @@ fetchint(struct proc *p, unsigned addr, int *ip) ...@@ -34,8 +34,9 @@ fetchint(struct proc *p, unsigned addr, int *ip)
return 0; return 0;
} }
// This arg is void* so that both int* and uint* can be passed.
int int
fetcharg(int argno, int *ip) fetcharg(int argno, void *ip)
{ {
unsigned esp; unsigned esp;
......
...@@ -36,11 +36,6 @@ trap(struct Trapframe *tf) ...@@ -36,11 +36,6 @@ trap(struct Trapframe *tf)
{ {
int v = tf->tf_trapno; int v = tf->tf_trapno;
if(cpus[cpu()].clis){
cprintf("cpu %d v %d eip %x\n", cpu(), v, tf->tf_eip);
panic("interrupt while interrupts are off");
}
if(v == T_SYSCALL){ if(v == T_SYSCALL){
struct proc *cp = curproc[cpu()]; struct proc *cp = curproc[cpu()];
int num = cp->tf->tf_regs.reg_eax; int num = cp->tf->tf_regs.reg_eax;
...@@ -56,12 +51,10 @@ trap(struct Trapframe *tf) ...@@ -56,12 +51,10 @@ trap(struct Trapframe *tf)
panic("trap ret but not RUNNING"); panic("trap ret but not RUNNING");
if(tf != cp->tf) if(tf != cp->tf)
panic("trap ret wrong tf"); panic("trap ret wrong tf");
if(cp->locks){ if(cpus[cpu()].nlock){
cprintf("num=%d\n", num); cprintf("num=%d\n", num);
panic("syscall returning locks held"); panic("syscall returning locks held");
} }
if(cpus[cpu()].clis)
panic("syscall returning but clis != 0");
if((read_eflags() & FL_IF) == 0) if((read_eflags() & FL_IF) == 0)
panic("syscall returning but FL_IF clear"); panic("syscall returning but FL_IF clear");
if(read_esp() < (unsigned)cp->kstack || if(read_esp() < (unsigned)cp->kstack ||
...@@ -75,7 +68,7 @@ trap(struct Trapframe *tf) ...@@ -75,7 +68,7 @@ trap(struct Trapframe *tf)
if(v == (IRQ_OFFSET + IRQ_TIMER)){ if(v == (IRQ_OFFSET + IRQ_TIMER)){
struct proc *cp = curproc[cpu()]; struct proc *cp = curproc[cpu()];
lapic_timerintr(); lapic_timerintr();
if(cp && cp->locks) if(cpus[cpu()].nlock)
panic("timer interrupt while holding a lock"); panic("timer interrupt while holding a lock");
if(cp){ if(cp){
#if 1 #if 1
......
#include "mmu.h" #include "mmu.h"
.text .text
.globl alltraps .globl trap
.globl trap .globl trapret1
.globl alltraps
alltraps: alltraps:
/* vectors.S sends all traps here */ /* vectors.S sends all traps here */
pushl %ds # build pushl %ds # build
...@@ -16,11 +18,11 @@ alltraps: ...@@ -16,11 +18,11 @@ alltraps:
addl $4, %esp addl $4, %esp
# return falls through to trapret... # return falls through to trapret...
.globl trapret
/* /*
* a forked process RETs here * a forked process RETs here
* expects ESP to point to a Trapframe * expects ESP to point to a Trapframe
*/ */
.globl trapret
trapret: trapret:
popal popal
popl %es popl %es
...@@ -28,6 +30,10 @@ trapret: ...@@ -28,6 +30,10 @@ trapret:
addl $0x8, %esp /* trapno and errcode */ addl $0x8, %esp /* trapno and errcode */
iret iret
.globl forkret
forkret:
call forkret1
jmp trapret
.globl acpu .globl acpu
acpu: acpu:
......
...@@ -29,6 +29,8 @@ static __inline uint32_t read_ebp(void) __attribute__((always_inline)); ...@@ -29,6 +29,8 @@ static __inline uint32_t read_ebp(void) __attribute__((always_inline));
static __inline uint32_t read_esp(void) __attribute__((always_inline)); static __inline uint32_t read_esp(void) __attribute__((always_inline));
static __inline void cpuid(uint32_t info, uint32_t *eaxp, uint32_t *ebxp, uint32_t *ecxp, uint32_t *edxp); static __inline void cpuid(uint32_t info, uint32_t *eaxp, uint32_t *ebxp, uint32_t *ecxp, uint32_t *edxp);
static __inline uint64_t read_tsc(void) __attribute__((always_inline)); static __inline uint64_t read_tsc(void) __attribute__((always_inline));
static __inline void cli(void) __attribute__((always_inline));
static __inline void sti(void) __attribute__((always_inline));
static __inline void static __inline void
breakpoint(void) breakpoint(void)
...@@ -304,6 +306,18 @@ read_tsc(void) ...@@ -304,6 +306,18 @@ read_tsc(void)
return tsc; return tsc;
} }
static __inline void
cli(void)
{
__asm__ volatile("cli");
}
static __inline void
sti(void)
{
__asm__ volatile("sti");
}
struct PushRegs { struct PushRegs {
/* registers as pushed by pusha */ /* registers as pushed by pusha */
uint32_t reg_edi; uint32_t reg_edi;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment