Commit 917043b4 authored by Irene Y Zhang's avatar Irene Y Zhang

running gofmt over everything

parent f9758b35
......@@ -8,7 +8,6 @@ type Clerk struct {
// You will have to modify this struct.
}
func MakeClerk(servers []string) *Clerk {
ck := new(Clerk)
ck.servers = servers
......
......@@ -6,6 +6,7 @@ const (
OK = "OK"
ErrNoKey = "ErrNoKey"
)
type Err string
type PutArgs struct {
......
......@@ -11,7 +11,7 @@ import "syscall"
import "encoding/gob"
import "math/rand"
const Debug=0
const Debug = 0
func DPrintf(format string, a ...interface{}) (n int, err error) {
if Debug > 0 {
......@@ -20,7 +20,6 @@ func DPrintf(format string, a ...interface{}) (n int, err error) {
return
}
type Op struct {
// Your definitions here.
// Field names must start with capital letters,
......@@ -38,7 +37,6 @@ type KVPaxos struct {
// Your definitions here.
}
func (kv *KVPaxos) Get(args *GetArgs, reply *GetReply) error {
// Your code here.
return nil
......@@ -81,13 +79,12 @@ func StartServer(servers []string, me int) *KVPaxos {
kv.px = paxos.Make(servers, me, rpcs)
os.Remove(servers[me])
l, e := net.Listen("unix", servers[me]);
l, e := net.Listen("unix", servers[me])
if e != nil {
log.Fatal("listen error: ", e);
log.Fatal("listen error: ", e)
}
kv.l = l
// please do not change any of the following code,
// or do anything to subvert it.
......@@ -95,10 +92,10 @@ func StartServer(servers []string, me int) *KVPaxos {
for kv.dead == false {
conn, err := kv.l.Accept()
if err == nil && kv.dead == false {
if kv.unreliable && (rand.Int63() % 1000) < 100 {
if kv.unreliable && (rand.Int63()%1000) < 100 {
// discard the request.
conn.Close()
} else if kv.unreliable && (rand.Int63() % 1000) < 200 {
} else if kv.unreliable && (rand.Int63()%1000) < 200 {
// process the request but force discard of reply.
c1 := conn.(*net.UnixConn)
f, _ := c1.File()
......@@ -122,4 +119,3 @@ func StartServer(servers []string, me int) *KVPaxos {
return kv
}
......@@ -98,7 +98,7 @@ func TestBasic(t *testing.T) {
}(nth)
}
for nth := 0; nth < npara; nth++ {
<- ca[nth]
<-ca[nth]
}
var va [nservers]string
for i := 0; i < nservers; i++ {
......@@ -155,7 +155,7 @@ func TestDone(t *testing.T) {
value[j] = byte((rand.Int() % 100) + 1)
}
ck.Put(key, string(value))
check(t, cka[i % nservers], key, string(value))
check(t, cka[i%nservers], key, string(value))
}
}
......@@ -178,7 +178,7 @@ func TestDone(t *testing.T) {
// fmt.Printf(" Memory: before %v, after %v\n", m0.Alloc, m1.Alloc)
allowed := m0.Alloc + uint64(nservers * items * sz * 2)
allowed := m0.Alloc + uint64(nservers*items*sz*2)
if m1.Alloc > allowed {
t.Fatalf("Memory use did not shrink enough (Used: %v, allowed: %v).\n", m1.Alloc, allowed)
}
......@@ -253,7 +253,7 @@ func TestPartition(t *testing.T) {
fmt.Printf("Test: No partition ...\n")
part(t, tag, nservers, []int{0,1,2,3,4}, []int{}, []int{})
part(t, tag, nservers, []int{0, 1, 2, 3, 4}, []int{}, []int{})
cka[0].Put("1", "12")
cka[2].Put("1", "13")
check(t, cka[3], "1", "13")
......@@ -262,7 +262,7 @@ func TestPartition(t *testing.T) {
fmt.Printf("Test: Progress in majority ...\n")
part(t, tag, nservers, []int{2,3,4}, []int{0,1}, []int{})
part(t, tag, nservers, []int{2, 3, 4}, []int{0, 1}, []int{})
cka[2].Put("1", "14")
check(t, cka[4], "1", "14")
......@@ -295,7 +295,7 @@ func TestPartition(t *testing.T) {
fmt.Printf("Test: Completion after heal ...\n")
part(t, tag, nservers, []int{0,2,3,4}, []int{1}, []int{})
part(t, tag, nservers, []int{0, 2, 3, 4}, []int{1}, []int{})
for iters := 0; iters < 30; iters++ {
if done0 {
break
......@@ -311,7 +311,7 @@ func TestPartition(t *testing.T) {
check(t, cka[4], "1", "15")
check(t, cka[0], "1", "15")
part(t, tag, nservers, []int{0,1,2}, []int{3,4}, []int{})
part(t, tag, nservers, []int{0, 1, 2}, []int{3, 4}, []int{})
for iters := 0; iters < 100; iters++ {
if done1 {
break
......@@ -374,7 +374,7 @@ func TestUnreliable(t *testing.T) {
sa := make([]string, len(kvh))
copy(sa, kvh)
for i := range sa {
j := rand.Intn(i+1)
j := rand.Intn(i + 1)
sa[i], sa[j] = sa[j], sa[i]
}
myck := MakeClerk(sa)
......@@ -406,7 +406,7 @@ func TestUnreliable(t *testing.T) {
}(cli)
}
for cli := 0; cli < ncli; cli++ {
x := <- ca[cli]
x := <-ca[cli]
if x == false {
t.Fatalf("failure")
}
......@@ -427,7 +427,7 @@ func TestUnreliable(t *testing.T) {
sa := make([]string, len(kvh))
copy(sa, kvh)
for i := range sa {
j := rand.Intn(i+1)
j := rand.Intn(i + 1)
sa[i], sa[j] = sa[j], sa[i]
}
myck := MakeClerk(sa)
......@@ -439,7 +439,7 @@ func TestUnreliable(t *testing.T) {
}(cli)
}
for cli := 0; cli < ncli; cli++ {
<- ca[cli]
<-ca[cli]
}
var va [nservers]string
......@@ -481,7 +481,7 @@ func TestHole(t *testing.T) {
defer part(t, tag, nservers, []int{}, []int{}, []int{})
for iters := 0; iters < 5; iters++ {
part(t, tag, nservers, []int{0,1,2,3,4}, []int{}, []int{})
part(t, tag, nservers, []int{0, 1, 2, 3, 4}, []int{}, []int{})
ck2 := MakeClerk([]string{port(tag, 2)})
ck2.Put("q", "q")
......@@ -516,12 +516,12 @@ func TestHole(t *testing.T) {
}
}
ok = true
} (xcli)
}(xcli)
}
time.Sleep(3 * time.Second)
part(t, tag, nservers, []int{2,3,4}, []int{0,1}, []int{})
part(t, tag, nservers, []int{2, 3, 4}, []int{0, 1}, []int{})
// can majority partition make progress even though
// minority servers were interrupted in the middle of
......@@ -531,11 +531,11 @@ func TestHole(t *testing.T) {
check(t, ck2, "q", "qq")
// restore network, wait for all threads to exit.
part(t, tag, nservers, []int{0,1,2,3,4}, []int{}, []int{})
part(t, tag, nservers, []int{0, 1, 2, 3, 4}, []int{}, []int{})
done = true
ok := true
for i := 0; i < nclients; i++ {
z := <- ca[i]
z := <-ca[i]
ok = ok && z
}
if ok == false {
......@@ -571,14 +571,14 @@ func TestManyPartition(t *testing.T) {
kva[i].unreliable = true
}
defer part(t, tag, nservers, []int{}, []int{}, []int{})
part(t, tag, nservers, []int{0,1,2,3,4}, []int{}, []int{})
part(t, tag, nservers, []int{0, 1, 2, 3, 4}, []int{}, []int{})
done := false
// re-partition periodically
ch1 := make(chan bool)
go func() {
defer func() { ch1 <- true } ()
defer func() { ch1 <- true }()
for done == false {
var a [nservers]int
for i := 0; i < nservers; i++ {
......@@ -594,7 +594,7 @@ func TestManyPartition(t *testing.T) {
}
}
part(t, tag, nservers, pa[0], pa[1], pa[2])
time.Sleep(time.Duration(rand.Int63() % 200) * time.Millisecond)
time.Sleep(time.Duration(rand.Int63()%200) * time.Millisecond)
}
}()
......@@ -610,7 +610,7 @@ func TestManyPartition(t *testing.T) {
sa[i] = port(tag, i)
}
for i := range sa {
j := rand.Intn(i+1)
j := rand.Intn(i + 1)
sa[i], sa[j] = sa[j], sa[i]
}
myck := MakeClerk(sa)
......@@ -635,17 +635,17 @@ func TestManyPartition(t *testing.T) {
}
}
ok = true
} (xcli)
}(xcli)
}
time.Sleep(20 * time.Second)
done = true
<- ch1
part(t, tag, nservers, []int{0,1,2,3,4}, []int{}, []int{})
<-ch1
part(t, tag, nservers, []int{0, 1, 2, 3, 4}, []int{}, []int{})
ok := true
for i := 0; i < nclients; i++ {
z := <- ca[i]
z := <-ca[i]
ok = ok && z
}
......
......@@ -12,7 +12,6 @@ type Clerk struct {
// Your definitions here.
}
func MakeClerk(primary string, backup string) *Clerk {
ck := new(Clerk)
ck.servers[0] = primary
......@@ -76,7 +75,6 @@ func (ck *Clerk) Lock(lockname string) bool {
return reply.OK
}
//
// ask the lock service to unlock a lock.
// returns true if the lock was previously held,
......
......@@ -22,7 +22,6 @@ type LockServer struct {
locks map[string]bool
}
//
// server Lock RPC handler.
//
......@@ -32,7 +31,6 @@ func (ls *LockServer) Lock(args *LockArgs, reply *LockReply) error {
ls.mu.Lock()
defer ls.mu.Unlock()
locked, _ := ls.locks[args.Lockname]
if locked {
......@@ -75,6 +73,7 @@ func (ls *LockServer) kill() {
type DeafConn struct {
c io.ReadWriteCloser
}
func (dc DeafConn) Write(p []byte) (n int, err error) {
return len(p), nil
}
......@@ -93,7 +92,6 @@ func StartServer(primary string, backup string, am_primary bool) *LockServer {
// Your initialization code here.
me := ""
if am_primary {
me = primary
......@@ -108,9 +106,9 @@ func StartServer(primary string, backup string, am_primary bool) *LockServer {
// prepare to receive connections from clients.
// change "unix" to "tcp" to use over a network.
os.Remove(me) // only needed for "unix"
l, e := net.Listen("unix", me);
l, e := net.Listen("unix", me)
if e != nil {
log.Fatal("listen error: ", e);
log.Fatal("listen error: ", e)
}
ls.l = l
......@@ -136,7 +134,7 @@ func StartServer(primary string, backup string, am_primary bool) *LockServer {
// this object has the type ServeConn expects,
// but discards writes (i.e. discards the RPC reply).
deaf_conn := DeafConn{c : conn}
deaf_conn := DeafConn{c: conn}
rpcs.ServeConn(deaf_conn)
......
......@@ -257,7 +257,7 @@ func TestPrimaryFail7(t *testing.T) {
time.Sleep(1 * time.Second)
tl(t, ck1, "b", true)
ok := <- ch
ok := <-ch
if ok == false {
t.Fatalf("re-sent Unlock did not return true")
}
......@@ -295,7 +295,7 @@ func TestPrimaryFail8(t *testing.T) {
time.Sleep(1 * time.Second)
tl(t, ck1, "a", true)
ok := <- ch
ok := <-ch
if ok == false {
t.Fatalf("re-sent Unlock did not return false")
}
......@@ -361,9 +361,9 @@ func TestMany(t *testing.T) {
var acks [nclients]bool
for xi := 0; xi < nclients; xi++ {
go func(i int){
go func(i int) {
ck := MakeClerk(phost, bhost)
rr := rand.New(rand.NewSource(int64(os.Getpid()+i)))
rr := rand.New(rand.NewSource(int64(os.Getpid() + i)))
for done == false {
locknum := (rr.Int() % nlocks)
lockname := strconv.Itoa(locknum + (i * 1000))
......@@ -392,7 +392,7 @@ func TestMany(t *testing.T) {
}
for locknum := 0; locknum < nlocks; locknum++ {
lockname := strconv.Itoa(locknum + (xi * 1000))
locked := ! ck.Lock(lockname)
locked := !ck.Lock(lockname)
if locked != state[xi][locknum] {
t.Fatal("bad final state")
}
......@@ -416,13 +416,13 @@ func TestConcurrentCounts(t *testing.T) {
const nlocks = 1
done := false
var acks [nclients]bool
var locks [nclients][nlocks] int
var unlocks [nclients][nlocks] int
var locks [nclients][nlocks]int
var unlocks [nclients][nlocks]int
for xi := 0; xi < nclients; xi++ {
go func(i int){
go func(i int) {
ck := MakeClerk(phost, bhost)
rr := rand.New(rand.NewSource(int64(os.Getpid()+i)))
rr := rand.New(rand.NewSource(int64(os.Getpid() + i)))
for done == false {
locknum := rr.Int() % nlocks
lockname := strconv.Itoa(locknum)
......@@ -462,7 +462,7 @@ func TestConcurrentCounts(t *testing.T) {
locked := ck.Unlock(strconv.Itoa(locknum))
// fmt.Printf("lock=%d nl=%d nu=%d locked=%v\n",
// locknum, nl, nu, locked)
if nl < nu || nl > nu + 1 {
if nl < nu || nl > nu+1 {
t.Fatal("lock race 1")
}
if nl == nu && locked != false {
......
......@@ -25,5 +25,7 @@ func main() {
fmt.Printf("Usage: lockd -p|-b primaryport backupport\n")
os.Exit(1)
}
for { time.Sleep(100 * time.Second) }
for {
time.Sleep(100 * time.Second)
}
}
......@@ -17,5 +17,7 @@ func main() {
pbservice.StartServer(os.Args[1], os.Args[2])
for { time.Sleep(100 * time.Second) }
for {
time.Sleep(100 * time.Second)
}
}
......@@ -17,5 +17,7 @@ func main() {
viewservice.StartServer(os.Args[1])
for { time.Sleep(100 * time.Second) }
for {
time.Sleep(100 * time.Second)
}
}
......@@ -132,7 +132,6 @@ func (mr *MapReduce) StartRegistrationServer() {
}()
}
// Name of the file that is the input for map job <MapJob>
func MapName(fileName string, MapJob int) string {
return "mrtmp." + fileName + "-" + strconv.Itoa(MapJob)
......@@ -141,22 +140,22 @@ func MapName(fileName string, MapJob int) string {
// Split bytes of input file into nMap splits, but split only on white space
func (mr *MapReduce) Split(fileName string) {
fmt.Printf("Split %s\n", fileName)
infile, err := os.Open(fileName);
infile, err := os.Open(fileName)
if err != nil {
log.Fatal("Split: ", err);
log.Fatal("Split: ", err)
}
defer infile.Close()
fi, err := infile.Stat();
fi, err := infile.Stat()
if err != nil {
log.Fatal("Split: ", err);
log.Fatal("Split: ", err)
}
size := fi.Size()
nchunk := size / int64(mr.nMap);
nchunk := size / int64(mr.nMap)
nchunk += 1
outfile, err := os.Create(MapName(fileName, 0))
if err != nil {
log.Fatal("Split: ", err);
log.Fatal("Split: ", err)
}
writer := bufio.NewWriter(outfile)
m := 1
......@@ -164,7 +163,7 @@ func (mr *MapReduce) Split(fileName string) {
scanner := bufio.NewScanner(infile)
for scanner.Scan() {
if (int64(i) > nchunk * int64(m)) {
if int64(i) > nchunk*int64(m) {
writer.Flush()
outfile.Close()
outfile, err = os.Create(MapName(fileName, m))
......@@ -196,18 +195,18 @@ func DoMap(JobNumber int, fileName string,
name := MapName(fileName, JobNumber)
file, err := os.Open(name)
if err != nil {
log.Fatal("DoMap: ", err);
log.Fatal("DoMap: ", err)
}
fi, err := file.Stat();
fi, err := file.Stat()
if err != nil {
log.Fatal("DoMap: ", err);
log.Fatal("DoMap: ", err)
}
size := fi.Size()
fmt.Printf("DoMap: read split %s %d\n", name, size)
b := make([]byte, size);
_, err = file.Read(b);
b := make([]byte, size)
_, err = file.Read(b)
if err != nil {
log.Fatal("DoMap: ", err);
log.Fatal("DoMap: ", err)
}
file.Close()
res := Map(string(b))
......@@ -215,15 +214,15 @@ func DoMap(JobNumber int, fileName string,
for r := 0; r < nreduce; r++ {
file, err = os.Create(ReduceName(fileName, JobNumber, r))
if err != nil {
log.Fatal("DoMap: create ", err);
log.Fatal("DoMap: create ", err)
}
enc := json.NewEncoder(file)
for e := res.Front(); e != nil; e = e.Next() {
kv := e.Value.(KeyValue)
if hash(kv.Key) % uint32(nreduce) == uint32(r) {
err := enc.Encode(&kv);
if hash(kv.Key)%uint32(nreduce) == uint32(r) {
err := enc.Encode(&kv)
if err != nil {
log.Fatal("DoMap: marshall ", err);
log.Fatal("DoMap: marshall ", err)
}
}
}
......@@ -238,21 +237,21 @@ func MergeName(fileName string, ReduceJob int) string {
// Read map outputs for partition job, sort them by key, call reduce for each
// key
func DoReduce(job int, fileName string, nmap int,
Reduce func(string,*list.List) string) {
Reduce func(string, *list.List) string) {
kvs := make(map[string]*list.List)
for i := 0; i < nmap; i++ {
name := ReduceName(fileName, i, job)
fmt.Printf("DoReduce: read %s\n", name)
file, err := os.Open(name)
if err != nil {
log.Fatal("DoReduce: ", err);
log.Fatal("DoReduce: ", err)
}
dec := json.NewDecoder(file)
for {
var kv KeyValue
err = dec.Decode(&kv);
err = dec.Decode(&kv)
if err != nil {
break;
break
}
_, ok := kvs[kv.Key]
if !ok {
......@@ -270,7 +269,7 @@ func DoReduce(job int, fileName string, nmap int,
p := MergeName(fileName, job)
file, err := os.Create(p)
if err != nil {
log.Fatal("DoReduce: create ", err);
log.Fatal("DoReduce: create ", err)
}
enc := json.NewEncoder(file)
for _, k := range keys {
......@@ -290,14 +289,14 @@ func (mr *MapReduce) Merge() {
fmt.Printf("Merge: read %s\n", p)
file, err := os.Open(p)
if err != nil {
log.Fatal("Merge: ", err);
log.Fatal("Merge: ", err)
}
dec := json.NewDecoder(file)
for {
var kv KeyValue
err = dec.Decode(&kv);
err = dec.Decode(&kv)
if err != nil {
break;
break
}
kvs[kv.Key] = kv.Value
}
......@@ -311,7 +310,7 @@ func (mr *MapReduce) Merge() {
file, err := os.Create("mrtmp." + mr.file)
if err != nil {
log.Fatal("Merge: create ", err);
log.Fatal("Merge: create ", err)
}
w := bufio.NewWriter(file)
for _, k := range keys {
......@@ -324,7 +323,7 @@ func (mr *MapReduce) Merge() {
func RemoveFile(n string) {
err := os.Remove(n)
if err != nil {
log.Fatal("CleanupFiles ", err);
log.Fatal("CleanupFiles ", err)
}
}
......@@ -344,7 +343,7 @@ func (mr *MapReduce) CleanupFiles() {
// Run jobs sequentially.
func RunSingle(nMap int, nReduce int, file string,
Map func(string) *list.List,
Reduce func(string,*list.List) string) {
Reduce func(string, *list.List) string) {
mr := InitMapReduce(nMap, nReduce, file, "")
mr.Split(mr.file)
for i := 0; i < nMap; i++ {
......@@ -363,7 +362,7 @@ func (mr *MapReduce) CleanupRegistration() {
if ok == false {
fmt.Printf("Cleanup: RPC %s error\n", mr.MasterAddress)
}
DPrintf("CleanupRegistration: done\n");
DPrintf("CleanupRegistration: done\n")
}
// Run jobs in parallel, assuming a shared file system
......