Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • syslab/tapir
  • aaasz/tapir
  • ashmrtnz/tapir
3 results
Show changes
Showing
with 4139 additions and 0 deletions
// -*- mode: c++; c-file-style: "k&r"; c-basic-offset: 4 -*-
/***********************************************************************
*
* lockserver/server.cc:
* A lockserver replica.
*
* Copyright 2015 Naveen Kr. Sharma <naveenks@cs.washington.edu>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************************/
#include "lockserver/server.h"
#include <algorithm>
#include <iterator>
#include <unordered_set>
namespace lockserver {
using namespace proto;
LockServer::LockServer() { }
LockServer::~LockServer() { }
void
LockServer::ExecInconsistentUpcall(const string &str1)
{
Debug("ExecInconsistent: %s\n", str1.c_str());
Request request;
request.ParseFromString(str1);
string key = request.key();
uint64_t client_id = request.clientid();
if (request.type()) { // Lock operation.
Warning("Lock operation being sent as Inconsistent. Ignored.");
} else {
if (locks.find(key) != locks.end()) {
if (client_id == locks[key]) {
Debug("Releasing lock %lu: %s", client_id, key.c_str());
locks.erase(key);
} else {
Debug("Lock held by someone else %lu: %s, %lu",
client_id, key.c_str(), locks[key]);
}
} else {
Debug("Lock held by no one.");
}
}
}
void
LockServer::ExecConsensusUpcall(const string &str1, string &str2)
{
Debug("ExecConsensus: %s\n", str1.c_str());
Request request;
Reply reply;
request.ParseFromString(str1);
string key = request.key();
uint64_t client_id = request.clientid();
reply.set_key(key);
int status = 0;
if (request.type()) { // Lock operation.
if (locks.find(key) == locks.end()) {
Debug("Assigning lock %lu: %s", client_id, key.c_str());
locks[key] = client_id;
} else if (locks[key] != client_id) {
Debug("Lock already held %lu: %s", client_id, key.c_str());
status = -1;
}
} else {
Warning("Unlock operation being sent as Consensus");
if (locks.find(key) == locks.end()) {
Debug("Lock held by no-one %lu: %s", client_id, key.c_str());
status = -2;
} else if (locks[key] != client_id) {
Debug("Lock held by someone else %lu: %s, %lu",
client_id, key.c_str(), locks[key]);
status = -2;
} else {
Debug("Releasing lock %lu: %s", client_id, key.c_str());
locks.erase(key);
}
}
reply.set_status(status);
reply.SerializeToString(&str2);
}
void
LockServer::UnloggedUpcall(const string &str1, string &str2)
{
Debug("Unlogged: %s\n", str1.c_str());
}
void
LockServer::Sync(const std::map<opid_t, RecordEntry>& record) {
locks.clear();
struct KeyLockInfo {
std::unordered_set<uint64_t> locked;
std::unordered_set<uint64_t> unlocked;
};
std::unordered_map<std::string, KeyLockInfo> key_lock_info;
for (const std::pair<const opid_t, RecordEntry> &p : record) {
const opid_t &opid = p.first;
const RecordEntry &entry = p.second;
Request request;
request.ParseFromString(entry.request.op());
Reply reply;
reply.ParseFromString(entry.result);
KeyLockInfo &info = key_lock_info[request.key()];
Debug("Sync opid=(%lu, %lu), clientid=%lu, key=%s, type=%d, status=%d.",
opid.first, opid.second, request.clientid(),
request.key().c_str(), request.type(), reply.status());
if (request.type() && reply.status() == 0) {
// Lock.
info.locked.insert(request.clientid());
} else if (!request.type() && reply.status() == 0) {
// Unlock.
info.unlocked.insert(request.clientid());
}
}
for (const std::pair<const std::string, KeyLockInfo> &p : key_lock_info) {
const std::string &key = p.first;
const KeyLockInfo &info = p.second;
std::unordered_set<uint64_t> diff;
std::set_difference(std::begin(info.locked), std::end(info.locked),
std::begin(info.unlocked), std::end(info.unlocked),
std::inserter(diff, diff.begin()));
ASSERT(diff.size() == 0 || diff.size() == 1);
if (diff.size() == 1) {
uint64_t client_id = *std::begin(diff);
Debug("Assigning lock %lu: %s", client_id, key.c_str());
locks[key] = client_id;
}
}
}
std::map<opid_t, std::string>
LockServer::Merge(const std::map<opid_t, std::vector<RecordEntry>> &d,
const std::map<opid_t, std::vector<RecordEntry>> &u,
const std::map<opid_t, std::string> &majority_results_in_d) {
// First note that d and u only contain consensus operations, and lock
// requests are the only consensus operations (unlock is an inconsistent
// operation), so d and u only contain lock requests. To merge, we grant
// any majority successful lock request in d if it does not conflict with a
// currently held lock. We do not grant any other lock request.
std::map<opid_t, std::string> results;
using EntryVec = std::vector<RecordEntry>;
for (const std::pair<const opid_t, EntryVec>& p: d) {
const opid_t &opid = p.first;
const EntryVec &entries = p.second;
// Get the request and reply.
const RecordEntry &entry = *std::begin(entries);
Request request;
request.ParseFromString(entry.request.op());
Reply reply;
auto iter = majority_results_in_d.find(opid);
ASSERT(iter != std::end(majority_results_in_d));
reply.ParseFromString(iter->second);
// Form the final result.
const bool operation_successful = reply.status() == 0;
if (operation_successful) {
// If the lock was successful, then we acquire the lock so long as
// it is not already held.
const std::string &key = reply.key();
if (locks.count(key) == 0) {
Debug("Assigning lock %lu: %s", request.clientid(),
key.c_str());
locks[key] = request.clientid();
results[opid] = iter->second;
} else {
Debug("Rejecting lock %lu: %s", request.clientid(),
key.c_str());
reply.set_status(-1);
std::string s;
reply.SerializeToString(&s);
results[opid] = s;
}
} else {
// If the lock was not successful, then we maintain this as the
// majority result.
results[opid] = iter->second;
}
}
// We reject all lock requests in u. TODO: We could acquire a lock if
// it is free, but it's simplest to just reject them unilaterally.
for (const std::pair<const opid_t, EntryVec>& p: u) {
const opid_t &opid = p.first;
const EntryVec &entries = p.second;
const RecordEntry &entry = *std::begin(entries);
Request request;
request.ParseFromString(entry.request.op());
Debug("Rejecting lock %lu: %s", request.clientid(),
request.key().c_str());
Reply reply;
reply.set_key(request.key());
reply.set_status(-1);
std::string s;
reply.SerializeToString(&s);
results[opid] = s;
}
return results;
}
} // namespace lockserver
// -*- mode: c++; c-file-style: "k&r"; c-basic-offset: 4 -*-
/***********************************************************************
*
* lockserver/server.h:
* A lockserver replica interface.
*
* Copyright 2015 Naveen Kr. Sharma <naveenks@cs.washington.edu>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************************/
#ifndef _IR_LOCK_SERVER_H_
#define _IR_LOCK_SERVER_H_
#include <string>
#include <unordered_map>
#include "lib/transport.h"
#include "replication/ir/replica.h"
#include "lockserver/locks-proto.pb.h"
namespace lockserver {
using opid_t = replication::ir::opid_t;
using RecordEntry = replication::ir::RecordEntry;
class LockServer : public replication::ir::IRAppReplica
{
public:
LockServer();
~LockServer();
// Invoke inconsistent operation, no return value
void ExecInconsistentUpcall(const string &str1) override;
// Invoke consensus operation
void ExecConsensusUpcall(const string &str1, string &str2) override;
// Invoke unreplicated operation
void UnloggedUpcall(const string &str1, string &str2) override;
// Sync
void Sync(const std::map<opid_t, RecordEntry>& record) override;
// Merge
std::map<opid_t, std::string> Merge(
const std::map<opid_t, std::vector<RecordEntry>> &d,
const std::map<opid_t, std::vector<RecordEntry>> &u,
const std::map<opid_t, std::string> &majority_results_in_d) override;
private:
std::unordered_map<std::string, uint64_t> locks;
};
} // namespace lockserver
#endif /* _IR_LOCK_SERVER_H_ */
d := $(dir $(lastword $(MAKEFILE_LIST)))
GTEST_SRCS += $(addprefix $(d), lockserver-test.cc)
$(d)lockserver-test: $(o)lockserver-test.o \
$(o)../locks-proto.o \
$(o)../server.o \
$(o)../client.o \
$(OBJS-ir-replica) \
$(OBJS-ir-client) \
$(LIB-configuration) \
$(LIB-repltransport) \
$(LIB-store-common) \
$(GTEST_MAIN)
TEST_BINS += $(d)lockserver-test
// -*- mode: c++; c-file-style: "k&r"; c-basic-offset: 4 -*-
/***********************************************************************
*
* lockserver_test.cc:
* test cases for lock server
*
* Copyright 2013 Dan R. K. Ports <drkp@cs.washington.edu>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************************/
#include <fstream>
#include <memory>
#include <thread>
#include <gtest/gtest.h>
#include "lib/configuration.h"
#include "lib/repltransport.h"
#include "lockserver/client.h"
#include "lockserver/server.h"
#include "replication/ir/replica.h"
class LockServerTest : public testing::Test {
protected:
std::vector<transport::ReplicaAddress> replica_addrs_;
std::unique_ptr<transport::Configuration> config_;
ReplTransport transport_;
std::vector<std::unique_ptr<lockserver::LockClient>> clients_;
std::vector<std::unique_ptr<lockserver::LockServer>> servers_;
std::vector<std::unique_ptr<replication::ir::IRReplica>> replicas_;
LockServerTest() {
replica_addrs_ = {{"replica", "0"},
{"replica", "1"},
{"replica", "2"},
{"replica", "3"},
{"replica", "4"}};
config_ = std::unique_ptr<transport::Configuration>(
new transport::Configuration(5, 2, replica_addrs_));
RemovePersistedFiles();
for (std::size_t i = 0; i < 3; ++i) {
auto client = std::unique_ptr<lockserver::LockClient>(
new lockserver::LockClient(&transport_, *config_));
client->lock_async(std::to_string(i));
clients_.push_back(std::move(client));
}
for (std::size_t i = 0; i < replica_addrs_.size(); ++i) {
auto server = std::unique_ptr<lockserver::LockServer>(
new lockserver::LockServer());
servers_.push_back(std::move(server));
auto replica = std::unique_ptr<replication::ir::IRReplica>(
new replication::ir::IRReplica(*config_, i, &transport_,
servers_[i].get()));
replicas_.push_back(std::move(replica));
}
}
virtual void TearDown() {
RemovePersistedFiles();
}
virtual void RemovePersistedFiles() {
for (std::size_t i = 0; i < replica_addrs_.size(); ++i) {
const transport::ReplicaAddress &addr = replica_addrs_[i];
const std::string filename =
addr.host + ":" + addr.port + "_" + std::to_string(i) + ".bin";
std::ifstream f(filename);
if (f.good()) {
int success = std::remove(filename.c_str());
ASSERT(success == 0);
}
}
}
};
// Note that these tests are all white box smoke tests. They depend on the
// low-level details of knowing exactly which timeouts are registered and which
// messages are sent. If an implementation detail is changed to make some of
// these tests fail, you should cal transport_.Run() and walk through the
// execution to trigger the desired behavior. Also, they only check to make
// sure that nothing crashes, though you can read through the Debug prints to
// make sure everything looks right.
//
// TODO: Use a ReplTransport for tests like the ones in ir-test.cc to assert
// that the correct messages are being sent.
TEST_F(LockServerTest, SuccessfulFastPathLock) {
// Send client 0's lock request.
transport_.TriggerTimer(1);
// Deliver lock request to replicas.
for (const auto &addr : replica_addrs_) {
transport_.DeliverMessage({addr.host, addr.port}, 0);
}
// Deliver lock reply to client.
for (std::size_t i = 0; i < replica_addrs_.size(); ++i) {
transport_.DeliverMessage({"client", "0"}, i);
}
// Deliver finalize to replicas.
for (const auto &addr : replica_addrs_) {
transport_.DeliverMessage({addr.host, addr.port}, 1);
}
// Deliver confirm to client.
int j = replica_addrs_.size();
for (std::size_t i = j; i < j + replica_addrs_.size(); ++i) {
transport_.DeliverMessage({"client", "0"}, i);
}
}
TEST_F(LockServerTest, SuccessfulSlowPathLock) {
// Send client 0's lock request.
transport_.TriggerTimer(1);
// Transition to slow path.
transport_.TriggerTimer(clients_.size() + replica_addrs_.size() + 1);
// Deliver lock request to replicas.
for (const auto &addr : replica_addrs_) {
transport_.DeliverMessage({addr.host, addr.port}, 0);
}
// Deliver lock reply to client.
for (std::size_t i = 0; i < replica_addrs_.size(); ++i) {
transport_.DeliverMessage({"client", "0"}, i);
}
// Deliver finalize to replicas.
for (const auto &addr : replica_addrs_) {
transport_.DeliverMessage({addr.host, addr.port}, 1);
}
// Deliver confirm to client.
int j = replica_addrs_.size();
for (std::size_t i = j; i < j + replica_addrs_.size(); ++i) {
transport_.DeliverMessage({"client", "0"}, i);
}
}
TEST_F(LockServerTest, SuccessfulViewChange) {
// Send client 0's lock request.
transport_.TriggerTimer(1);
// Deliver lock request to replicas.
for (const auto &addr : replica_addrs_) {
transport_.DeliverMessage({addr.host, addr.port}, 0);
}
// Initiate view changes on all replicas.
const std::size_t nclients = clients_.size();
const std::size_t nreplicas = replica_addrs_.size();
for (std::size_t i = nclients + 1; i < nclients + nreplicas + 1; ++i) {
transport_.TriggerTimer(i);
}
// Deliver DoViewChangeMessages to new primary.
const transport::ReplicaAddress& primary = replica_addrs_[1];
for (std::size_t i = 1; i < 1 + nreplicas - 1; ++i) {
transport_.DeliverMessage({primary.host, primary.port}, i);
}
// Deliver StartViewMessage to all replicas.
for (std::size_t i = 0; i < nreplicas; ++i) {
if (i == 1) {
continue;
}
const transport::ReplicaAddress& addr = replica_addrs_[i];
transport_.DeliverMessage({addr.host, addr.port}, nreplicas);
}
}
TEST_F(LockServerTest, SuccessfulViewChangeNonemptyRdu) {
const std::size_t nclients = clients_.size();
const std::size_t nreplicas = replica_addrs_.size();
ASSERT_GE(nclients, 3);
ASSERT_GE(nreplicas, 3);
// Send client 0's lock request.
transport_.TriggerTimer(1);
// Deliver lock request to replicas.
for (const auto &addr : replica_addrs_) {
transport_.DeliverMessage({addr.host, addr.port}, 0);
}
// Deliver lock reply to client.
for (std::size_t i = 0; i < replica_addrs_.size(); ++i) {
transport_.DeliverMessage({"client", "0"}, i);
}
// Deliver finalize to replicas.
for (const auto &addr : replica_addrs_) {
transport_.DeliverMessage({addr.host, addr.port}, 1);
}
// Send client 1's lock request.
transport_.TriggerTimer(2);
// Deliver lock request to first three replicas.
for (std::size_t i = 0; i < 3; ++i) {
const transport::ReplicaAddress &addr = replica_addrs_[i];
transport_.DeliverMessage({addr.host, addr.port}, 2);
}
// Send client 2's lock request.
transport_.TriggerTimer(3);
// Deliver lock request to first replica.
const transport::ReplicaAddress &addr = replica_addrs_[0];
transport_.DeliverMessage({addr.host, addr.port}, 3);
// View change first three replicas.
for (std::size_t i = nclients + 1; i < nclients + 1 + 3; ++i) {
transport_.TriggerTimer(i);
}
// Deliver DoViewChangeMessages to new primary.
const transport::ReplicaAddress& primary = replica_addrs_[1];
for (std::size_t i = 4; i < 4 + 2; ++i) {
transport_.DeliverMessage({primary.host, primary.port}, i);
}
// Deliver StartViewMessage to replica 0 and 2.
const transport::ReplicaAddress& addr0 = replica_addrs_[0];
const transport::ReplicaAddress& addr2 = replica_addrs_[2];
transport_.DeliverMessage({addr0.host, addr0.port}, 6);
transport_.DeliverMessage({addr2.host, addr2.port}, 6);
}
TEST_F(LockServerTest, FinalizeConsensusReply) {
const std::size_t nclients = clients_.size();
const std::size_t nreplicas = replica_addrs_.size();
// Send client 0's lock request.
transport_.TriggerTimer(1);
// Deliver lock request to replicas.
for (const auto &addr : replica_addrs_) {
transport_.DeliverMessage({addr.host, addr.port}, 0);
}
// Trigger view change.
for (std::size_t i = nclients + 1; i < nclients + 1 + nreplicas; ++i) {
transport_.TriggerTimer(i);
}
// Deliver DoViewChangeMessages to new primary.
const transport::ReplicaAddress& primary = replica_addrs_[1];
for (std::size_t i = 1; i < 1 + nreplicas - 1; ++i) {
transport_.DeliverMessage({primary.host, primary.port}, i);
}
// Deliver StartViewMessage to all replicas.
for (std::size_t i = 0; i < nreplicas; ++i) {
if (i == 1) {
continue;
}
const transport::ReplicaAddress& addr = replica_addrs_[i];
transport_.DeliverMessage({addr.host, addr.port}, nreplicas);
}
// Deliver lock request to replicas.
for (const auto &addr : replica_addrs_) {
transport_.DeliverMessage({addr.host, addr.port}, 0);
}
// Deliver finalized reply to client.
transport_.DeliverMessage({"client", "0"}, nreplicas);
}
TEST_F(LockServerTest, MismatchedConsensus) {
const std::size_t nclients = clients_.size();
const std::size_t nreplicas = replica_addrs_.size();
// Send client 0's lock request.
transport_.TriggerTimer(1);
// Transition to slow path.
transport_.TriggerTimer(nclients + nreplicas + 1);
// Deliver lock request to replicas.
for (const auto &addr : replica_addrs_) {
transport_.DeliverMessage({addr.host, addr.port}, 0);
}
// Deliver lock reply to client.
for (std::size_t i = 0; i < replica_addrs_.size(); ++i) {
transport_.DeliverMessage({"client", "0"}, i);
}
// Trigger view change.
for (std::size_t i = nclients + 1; i < nclients + 1 + nreplicas; ++i) {
transport_.TriggerTimer(i);
}
// Deliver DoViewChangeMessages to new primary.
const transport::ReplicaAddress& primary = replica_addrs_[1];
for (std::size_t i = 2; i < 2 + nreplicas - 1; ++i) {
transport_.DeliverMessage({primary.host, primary.port}, i);
}
// Deliver StartViewMessage to all replicas.
for (std::size_t i = 0; i < nreplicas; ++i) {
if (i == 1) {
continue;
}
const transport::ReplicaAddress& addr = replica_addrs_[i];
transport_.DeliverMessage({addr.host, addr.port}, 2 + nreplicas - 1);
}
// Deliver FinalizeConsensusMessage to all replicas.
for (const auto &addr : replica_addrs_) {
transport_.DeliverMessage({addr.host, addr.port}, 1);
}
// Deliver ConfirmMessages to client 0.
for (std::size_t i = nreplicas; i < nreplicas + nreplicas; ++i) {
transport_.DeliverMessage({"client", "0"}, i);
}
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
--------------------------------- MODULE Test ---------------------------------
(***************************************************************************)
(* This is a TLA+ specification of the Inconsistent Replication algorithm. *)
(* (And a mechanically-checked proof of its correctness using TLAPS) *)
(***************************************************************************)
EXTENDS Naturals, FiniteSets, TLC
VARIABLES rViewReplies, recoveredOps
OpType == {"Inconsistent", "Consensus"}
OpStatus == {"TENTATIVE", "FINALIZED"}
Operations == [type: OpType, body: Nat]
TypeOK ==
/\ rViewReplies \in SUBSET ([lv: Nat,
r: SUBSET ([msgid: Nat,
op: Operations,
res: Nat,
status: OpStatus]
\cup [msgid: Nat,
op: Operations,
status: OpStatus]),
src: Nat])
A ==
rViewReplies
B ==
\* set of all records received in replies in A
UNION {x.r: x \in A}
test_recoveredConensusOps_R ==
\* any finalized consensus operation (in at least one record, in the maximum
\* latest view)
{x \in B:
/\ x.op.type = "Consensus"
/\ x.status = "FINALIZED"
/\ LET most_updated_reply ==
CHOOSE reply \in A:
/\ \E rec \in reply.r: /\ rec.msgid = x.msgid
/\ rec.status = "FINALIZED"
/\ \A rep \in A:
IF \E rec \in rep.r: /\ rec.msgid = x.msgid
/\ rec.status = "FINALIZED"
THEN rep.lv <= reply.lv
ELSE TRUE
IN
x \in most_updated_reply.r}
Init ==
/\ rViewReplies = {[lv |-> 1, r |-> {[msgid |-> 1, op |-> [type |-> "Consensus", body |-> 1], res |-> 1, status |-> "FINALIZED"]}, src |-> 1],
[lv |-> 2, r |-> {[msgid |-> 1, op |-> [type |-> "Consensus", body |-> 1], res |-> 2, status |-> "FINALIZED"]}, src |-> 2],
[lv |-> 3, r |-> {[msgid |-> 1, op |-> [type |-> "Consensus", body |-> 1], res |-> 3, status |-> "FINALIZED"]}, src |-> 3]}
/\ recoveredOps = {}
Next ==
/\ recoveredOps' = test_recoveredConensusOps_R
/\ Assert(Cardinality(recoveredOps) = 0, "Should fail")
/\ UNCHANGED <<rViewReplies>>
=============================================================================
\* Modification History
\* Last modified Fri Apr 24 14:34:42 PDT 2015 by aaasz
\* Created Fri Dec 12 17:42:14 PST 2014 by aaasz
This diff is collapsed.
Quorums <- {{1,2}, {1,3}, {2,3}}
Clients <- {1}
max_req <- 1
f <- 1
max_vc <- 3
Operations <- {1}
Replicas <- {1,2,3}
max_c <- 3
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.