diff --git a/accumulo/pom.xml b/accumulo/pom.xml
index 1094be274f6a2f35ae7066f907d29a2f2a0d0070..08da0af6908b55296a6d3653607d49aad40d09f9 100644
--- a/accumulo/pom.xml
+++ b/accumulo/pom.xml
@@ -22,7 +22,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   <artifactId>accumulo-binding</artifactId>
diff --git a/aerospike/pom.xml b/aerospike/pom.xml
index 23cc3bcc78a926f3887cd37702b791818247205e..97a3213e74b433778aa0de2a607be3ee62178fa2 100644
--- a/aerospike/pom.xml
+++ b/aerospike/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
diff --git a/bin/ycsb b/bin/ycsb
index 16c5a82a639e1e4068e5eaa52b63bb1a87cbfc78..ef2c18598f99926a3131c104c155bb4bdb5be9bf 100755
--- a/bin/ycsb
+++ b/bin/ycsb
@@ -58,9 +58,11 @@ DATABASES = {
     "cassandra-cql": "com.yahoo.ycsb.db.CassandraCQLClient",
     "cassandra2-cql": "com.yahoo.ycsb.db.CassandraCQLClient",
     "couchbase"    : "com.yahoo.ycsb.db.CouchbaseClient",
+    "couchbase2"   : "com.yahoo.ycsb.db.couchbase2.Couchbase2Client",
     "dynamodb"     : "com.yahoo.ycsb.db.DynamoDBClient",
     "elasticsearch": "com.yahoo.ycsb.db.ElasticsearchClient",
     "geode"        : "com.yahoo.ycsb.db.GeodeClient",
+    "googlebigtable"  : "com.yahoo.ycsb.db.GoogleBigtableClient",
     "googledatastore" : "com.yahoo.ycsb.db.GoogleDatastoreClient",
     "hbase094"     : "com.yahoo.ycsb.db.HBaseClient",
     "hbase098"     : "com.yahoo.ycsb.db.HBaseClient",
diff --git a/binding-parent/datastore-specific-descriptor/pom.xml b/binding-parent/datastore-specific-descriptor/pom.xml
index 1c0a31ed7ff55147e5ecd27b23ba8e2ba898521a..840242e7f5f71b824213e14e3974a0edd86bc31e 100644
--- a/binding-parent/datastore-specific-descriptor/pom.xml
+++ b/binding-parent/datastore-specific-descriptor/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>root</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../../</relativePath>
   </parent>
 
diff --git a/binding-parent/pom.xml b/binding-parent/pom.xml
index fbca4de167b3e5ad79183d27529fbfa9b6751802..0416208b2b9846e22d3a43117c8c64c2e44c5b56 100644
--- a/binding-parent/pom.xml
+++ b/binding-parent/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>root</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
   </parent>
 
   <artifactId>binding-parent</artifactId>
diff --git a/cassandra/pom.xml b/cassandra/pom.xml
index 4b3cbae5a92b85cc55c671aa7a49e799473acc51..274ed5264506b69366abbb75180fc7c3b40265f3 100644
--- a/cassandra/pom.xml
+++ b/cassandra/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
      <groupId>com.yahoo.ycsb</groupId>
      <artifactId>binding-parent</artifactId>
-     <version>0.8.0-SNAPSHOT</version>
+     <version>0.9.0-SNAPSHOT</version>
      <relativePath>../binding-parent</relativePath>
   </parent>
   
diff --git a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient10.java b/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient10.java
index 9eaf2eeb5ef35867a170358af536d3bc8469d721..b3d8e4a7d18824f13ed28a5dff051c69eb5e8ac4 100644
--- a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient10.java
+++ b/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient10.java
@@ -453,6 +453,10 @@ public class CassandraClient10 extends DB {
     }
 
     for (int i = 0; i < operationRetries; i++) {
+      mutations.clear();
+      mutationMap.clear();
+      record.clear();
+
       if (debug) {
         System.out.println("Inserting key: " + key);
       }
@@ -479,10 +483,6 @@ public class CassandraClient10 extends DB {
 
         client.batch_mutate(record, writeConsistencyLevel);
 
-        mutations.clear();
-        mutationMap.clear();
-        record.clear();
-
         if (debug) {
           System.out
               .println("ConsistencyLevel=" + writeConsistencyLevel.toString());
diff --git a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient7.java b/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient7.java
index 8f63554953f0574d5f7f8f9c467e0dd847155994..f2075e7f77780a8a609d03af84aa127ad7d5419b 100644
--- a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient7.java
+++ b/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient7.java
@@ -418,6 +418,10 @@ public class CassandraClient7 extends DB {
     }
 
     for (int i = 0; i < operationRetries; i++) {
+      mutations.clear();
+      mutationMap.clear();
+      record.clear();
+
       if (debug) {
         System.out.println("Inserting key: " + key);
       }
@@ -444,10 +448,6 @@ public class CassandraClient7 extends DB {
 
         client.batch_mutate(record, ConsistencyLevel.ONE);
 
-        mutations.clear();
-        mutationMap.clear();
-        record.clear();
-
         return Status.OK;
       } catch (Exception e) {
         errorexception = e;
diff --git a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient8.java b/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient8.java
index ca6b0cf79bfce1a21771780c9435740b1295e924..ca72c339da797073df4da5e0e9d1af42c612a360 100644
--- a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient8.java
+++ b/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient8.java
@@ -397,6 +397,10 @@ public class CassandraClient8 extends DB {
     }
 
     for (int i = 0; i < operationRetries; i++) {
+      mutations.clear();
+      mutationMap.clear();
+      record.clear();
+
       if (debug) {
         System.out.println("Inserting key: " + key);
       }
@@ -423,10 +427,6 @@ public class CassandraClient8 extends DB {
 
         client.batch_mutate(record, ConsistencyLevel.ONE);
 
-        mutations.clear();
-        mutationMap.clear();
-        record.clear();
-
         return Status.OK;
       } catch (Exception e) {
         errorexception = e;
diff --git a/cassandra2/pom.xml b/cassandra2/pom.xml
index f0f19edecc5a1aed5e69a46d7f4cb1a8c655e752..1a4b0ab6eb7fcddb87a3c29880c61b8665d77b04 100644
--- a/cassandra2/pom.xml
+++ b/cassandra2/pom.xml
@@ -23,7 +23,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
@@ -31,6 +31,11 @@ LICENSE file.
   <name>Cassandra 2.1+ DB Binding</name>
   <packaging>jar</packaging>
 
+  <properties>
+    <!-- Skip tests by default. will be activated by jdk8 profile -->
+    <skipTests>true</skipTests>
+  </properties>
+
   <dependencies>
     <!-- CQL driver -->
     <dependency>
@@ -46,8 +51,9 @@ LICENSE file.
     </dependency>
     <dependency>
       <groupId>org.cassandraunit</groupId>
-      <artifactId>cassandra-unit-shaded</artifactId>
-      <version>2.1.9.2</version>
+      <artifactId>cassandra-unit</artifactId>
+      <version>3.0.0.1</version>
+      <classifier>shaded</classifier>
       <scope>test</scope>
     </dependency>
     <dependency>
@@ -57,4 +63,19 @@ LICENSE file.
       <scope>test</scope>
     </dependency>
   </dependencies>
+
+  <profiles>
+    <!-- Cassandra 2.2+ requires JDK8 to run, so none of our tests
+         will work unless we're using jdk8.
+      -->
+    <profile>
+      <id>jdk8-tests</id>
+      <activation>
+        <jdk>1.8</jdk>
+      </activation>
+      <properties>
+        <skipTests>false</skipTests>
+      </properties>
+    </profile>
+  </profiles>
 </project>
diff --git a/cassandra2/src/test/java/com/yahoo/ycsb/db/CassandraCQLClientTest.java b/cassandra2/src/test/java/com/yahoo/ycsb/db/CassandraCQLClientTest.java
index bc73a73710506b957e39ce80ab730da908ea6654..60b7e2f33f392d68265f89352b17d62fc261e297 100644
--- a/cassandra2/src/test/java/com/yahoo/ycsb/db/CassandraCQLClientTest.java
+++ b/cassandra2/src/test/java/com/yahoo/ycsb/db/CassandraCQLClientTest.java
@@ -63,11 +63,12 @@ public class CassandraCQLClientTest {
   private Session session;
 
   @ClassRule
-  public static CassandraCQLUnit cassandraUnit =
-      new CassandraCQLUnit(new ClassPathCQLDataSet("ycsb.cql", "ycsb"));
+  public static CassandraCQLUnit cassandraUnit = new CassandraCQLUnit(new ClassPathCQLDataSet("ycsb.cql", "ycsb"));
 
   @Before
-  public void setUpClient() throws Exception {
+  public void setUp() throws Exception {
+    session = cassandraUnit.getSession();
+
     Properties p = new Properties();
     p.setProperty("hosts", HOST);
     p.setProperty("port", Integer.toString(PORT));
@@ -81,14 +82,11 @@ public class CassandraCQLClientTest {
     client.init();
   }
 
-  @Before
-  public void setSession() {
-    session = cassandraUnit.getSession();
-  }
-
   @After
   public void tearDownClient() throws Exception {
-    client.cleanup();
+    if (client != null) {
+      client.cleanup();
+    }
     client = null;
   }
 
@@ -96,7 +94,9 @@ public class CassandraCQLClientTest {
   public void clearTable() throws Exception {
     // Clear the table so that each test starts fresh.
     final Statement truncate = QueryBuilder.truncate(TABLE);
-    cassandraUnit.getSession().execute(truncate);
+    if (cassandraUnit != null) {
+      cassandraUnit.getSession().execute(truncate);
+    }
   }
 
   @Test
diff --git a/core/pom.xml b/core/pom.xml
index e914c5c04b996deee017a0dc151cdf13e44f53e6..3300cf485f3d63e505e349350605830d6c12c8b1 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>root</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
   </parent>
   
   <artifactId>core</artifactId>
diff --git a/couchbase/pom.xml b/couchbase/pom.xml
index e98566d7ef4f772c7bfef5b8ad00209720391314..5f4780aa96eb68689befd4ed028d454470cab4b4 100644
--- a/couchbase/pom.xml
+++ b/couchbase/pom.xml
@@ -22,7 +22,7 @@ LICENSE file.
     <parent>
         <groupId>com.yahoo.ycsb</groupId>
         <artifactId>binding-parent</artifactId>
-        <version>0.8.0-SNAPSHOT</version>
+        <version>0.9.0-SNAPSHOT</version>
         <relativePath>../binding-parent</relativePath>
     </parent>
 
diff --git a/couchbase2/README.md b/couchbase2/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..455a4eea02205479b0cc255a82c808e063906ee9
--- /dev/null
+++ b/couchbase2/README.md
@@ -0,0 +1,115 @@
+<!--
+Copyright (c) 2015 - 2016 YCSB contributors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you
+may not use this file except in compliance with the License. You
+may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied. See the License for the specific language governing
+permissions and limitations under the License. See accompanying
+LICENSE file.
+-->
+
+# Couchbase (SDK 2.x) Driver for YCSB
+This driver is a binding for the YCSB facilities to operate against a Couchbase Server cluster. It uses the official
+Couchbase Java SDK (version 2.x) and provides a rich set of configuration options, including support for the N1QL
+query language.
+
+## Quickstart
+
+### 1. Start Couchbase Server
+You need to start a single node or a cluster to point the client at. Please see [http://couchbase.com](couchbase.com)
+for more details and instructions.
+
+### 2. Set up YCSB
+You can either download the release zip and run it, or just clone from master.
+
+```
+git clone git://github.com/brianfrankcooper/YCSB.git
+cd YCSB
+mvn clean package
+```
+
+### 3. Run the Workload
+Before you can actually run the workload, you need to "load" the data first.
+
+```
+bin/ycsb load couchbase2 -s -P workloads/workloada
+```
+
+Then, you can run the workload:
+
+```
+bin/ycsb run couchbase2 -s -P workloads/workloada
+```
+
+Please see the general instructions in the `doc` folder if you are not sure how it all works. You can apply a property
+(as seen in the next section) like this:
+
+```
+bin/ycsb run couchbase -s -P workloads/workloada -p couchbase.epoll=true
+```
+
+## N1QL Index Setup
+In general, every time N1QL is used (either implicitly through using `workloade` or through setting `kv=false`) some
+kind of index must be present to make it work. Depending on the workload and data size, choosing the right index is
+crucial at runtime in order to get the best performance. If in doubt, please ask at the
+[forums](http://forums.couchbase.com) or get in touch with our team at Couchbase.
+
+For `workloade` and the default `readallfields=true` we recommend creating the following index, and if using Couchbase
+Server 4.5 or later with the "Memory Optimized Index" setting on the bucket.
+
+```
+CREATE INDEX wle_idx ON `bucketname`(meta().id);
+```
+
+For other workloads, different index setups might be even more performant.
+
+## Performance Considerations
+As it is with any benchmark, there are lot of knobs to tune in order to get great or (if you are reading
+this and trying to write a competitor benchmark ;-)) bad performance.
+
+The first setting you should consider, if you are running on Linux 64bit is setting `-p couchbase.epoll=true`. This will
+then turn on the Epoll IO mechanisms in the underlying Netty library which provides better performance since it has less
+synchronization to do than the NIO default. This only works on Linux, but you are benchmarking on the OS you are
+deploying to, right?
+
+The second option, `boost`, sounds more magic than it actually is. By default this benchmark trades CPU for throughput,
+but this can be disabled by setting `-p couchbase.boost=0`. This defaults to 3, and 3 is the number of event loops run
+in the IO layer. 3 is a reasonable default but you should set it to the number of **physical** cores you have available
+on the machine if you only plan to run one YCSB instance. Make sure (using profiling) to max out your cores, but don't
+overdo it.
+
+## Sync vs Async
+By default, since YCSB is sync the code will always wait for the operation to complete. In some cases it can be useful
+to just "drive load" and disable the waiting. Note that when the "-p couchbase.syncMutationResponse=false" option is
+used, the measured results by YCSB can basically be thrown away. Still helpful sometimes during load phases to speed
+them up :)
+
+## Configuration Options
+Since no setup is the same and the goal of YCSB is to deliver realistic benchmarks, here are some setups that you can
+tune. Note that if you need more flexibility (let's say a custom transcoder), you still need to extend this driver and
+implement the facilities on your own.
+
+You can set the following properties (with the default settings applied):
+
+ - couchbase.host=127.0.0.1: The hostname from one server.
+ - couchbase.bucket=default: The bucket name to use.
+ - couchbase.password=: The password of the bucket.
+ - couchbase.syncMutationResponse=true: If mutations should wait for the response to complete.
+ - couchbase.persistTo=0: Persistence durability requirement
+ - couchbase.replicateTo=0: Replication durability requirement
+ - couchbase.upsert=false: Use upsert instead of insert or replace.
+ - couchbase.adhoc=false: If set to true, prepared statements are not used.
+ - couchbase.kv=true: If set to false, mutation operations will also be performed through N1QL.
+ - couchbase.maxParallelism=1: The server parallelism for all n1ql queries.
+ - couchbase.kvEndpoints=1: The number of KV sockets to open per server.
+ - couchbase.queryEndpoints=5: The number of N1QL Query sockets to open per server.
+ - couchbase.epoll=false: If Epoll instead of NIO should be used (only available for linux.
+ - couchbase.boost=3: If > 0 trades CPU for higher throughput. N is the number of event loops, ideally
+   set to the number of physical cores. Setting higher than that will likely degrade performance.
\ No newline at end of file
diff --git a/couchbase2/pom.xml b/couchbase2/pom.xml
new file mode 100644
index 0000000000000000000000000000000000000000..f152a856efb6d94bab62b503e2984cfcea737c35
--- /dev/null
+++ b/couchbase2/pom.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2015 YCSB contributors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you
+may not use this file except in compliance with the License. You
+may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied. See the License for the specific language governing
+permissions and limitations under the License. See accompanying
+LICENSE file.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <groupId>com.yahoo.ycsb</groupId>
+        <artifactId>binding-parent</artifactId>
+        <version>0.9.0-SNAPSHOT</version>
+        <relativePath>../binding-parent</relativePath>
+    </parent>
+
+    <artifactId>couchbase2-binding</artifactId>
+    <name>Couchbase Java SDK 2.x Binding</name>
+    <packaging>jar</packaging>
+
+    <dependencies>
+        <dependency>
+            <groupId>com.couchbase.client</groupId>
+            <artifactId>java-client</artifactId>
+            <version>${couchbase2.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>com.yahoo.ycsb</groupId>
+            <artifactId>core</artifactId>
+            <version>${project.version}</version>
+            <scope>provided</scope>
+        </dependency>
+    </dependencies>
+
+</project>
diff --git a/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java b/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java
new file mode 100644
index 0000000000000000000000000000000000000000..41695243ee10f392baab65d0ed00cabec02e6cae
--- /dev/null
+++ b/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java
@@ -0,0 +1,896 @@
+/**
+ * Copyright (c) 2016 Yahoo! Inc. All rights reserved.
+ * <p>
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+
+package com.yahoo.ycsb.db.couchbase2;
+
+import com.couchbase.client.core.env.DefaultCoreEnvironment;
+import com.couchbase.client.core.env.resources.IoPoolShutdownHook;
+import com.couchbase.client.core.logging.CouchbaseLogger;
+import com.couchbase.client.core.logging.CouchbaseLoggerFactory;
+import com.couchbase.client.deps.com.fasterxml.jackson.core.JsonFactory;
+import com.couchbase.client.deps.com.fasterxml.jackson.core.JsonGenerator;
+import com.couchbase.client.deps.com.fasterxml.jackson.databind.JsonNode;
+import com.couchbase.client.deps.com.fasterxml.jackson.databind.node.ObjectNode;
+import com.couchbase.client.deps.io.netty.channel.DefaultSelectStrategyFactory;
+import com.couchbase.client.deps.io.netty.channel.EventLoopGroup;
+import com.couchbase.client.deps.io.netty.channel.SelectStrategy;
+import com.couchbase.client.deps.io.netty.channel.SelectStrategyFactory;
+import com.couchbase.client.deps.io.netty.channel.epoll.EpollEventLoopGroup;
+import com.couchbase.client.deps.io.netty.channel.nio.NioEventLoopGroup;
+import com.couchbase.client.deps.io.netty.util.IntSupplier;
+import com.couchbase.client.deps.io.netty.util.concurrent.DefaultThreadFactory;
+import com.couchbase.client.java.Bucket;
+import com.couchbase.client.java.Cluster;
+import com.couchbase.client.java.CouchbaseCluster;
+import com.couchbase.client.java.PersistTo;
+import com.couchbase.client.java.ReplicateTo;
+import com.couchbase.client.java.document.Document;
+import com.couchbase.client.java.document.RawJsonDocument;
+import com.couchbase.client.java.document.json.JsonArray;
+import com.couchbase.client.java.document.json.JsonObject;
+import com.couchbase.client.java.env.CouchbaseEnvironment;
+import com.couchbase.client.java.env.DefaultCouchbaseEnvironment;
+import com.couchbase.client.java.error.TemporaryFailureException;
+import com.couchbase.client.java.query.*;
+import com.couchbase.client.java.transcoder.JacksonTransformers;
+import com.couchbase.client.java.util.Blocking;
+import com.yahoo.ycsb.ByteIterator;
+import com.yahoo.ycsb.DB;
+import com.yahoo.ycsb.DBException;
+import com.yahoo.ycsb.Status;
+import com.yahoo.ycsb.StringByteIterator;
+import rx.Observable;
+import rx.Subscriber;
+import rx.functions.Action1;
+import rx.functions.Func1;
+
+import java.io.StringWriter;
+import java.io.Writer;
+import java.nio.channels.spi.SelectorProvider;
+import java.util.*;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.LockSupport;
+
+/**
+ * A class that wraps the 2.x Couchbase SDK to be used with YCSB.
+ *
+ * <p> The following options can be passed when using this database client to override the defaults.
+ *
+ * <ul>
+ * <li><b>couchbase.host=127.0.0.1</b> The hostname from one server.</li>
+ * <li><b>couchbase.bucket=default</b> The bucket name to use.</li>
+ * <li><b>couchbase.password=</b> The password of the bucket.</li>
+ * <li><b>couchbase.syncMutationResponse=true</b> If mutations should wait for the response to complete.</li>
+ * <li><b>couchbase.persistTo=0</b> Persistence durability requirement</li>
+ * <li><b>couchbase.replicateTo=0</b> Replication durability requirement</li>
+ * <li><b>couchbase.upsert=false</b> Use upsert instead of insert or replace.</li>
+ * <li><b>couchbase.adhoc=false</b> If set to true, prepared statements are not used.</li>
+ * <li><b>couchbase.kv=true</b> If set to false, mutation operations will also be performed through N1QL.</li>
+ * <li><b>couchbase.maxParallelism=1</b> The server parallelism for all n1ql queries.</li>
+ * <li><b>couchbase.kvEndpoints=1</b> The number of KV sockets to open per server.</li>
+ * <li><b>couchbase.queryEndpoints=5</b> The number of N1QL Query sockets to open per server.</li>
+ * <li><b>couchbase.epoll=false</b> If Epoll instead of NIO should be used (only available for linux.</li>
+ * <li><b>couchbase.boost=3</b> If > 0 trades CPU for higher throughput. N is the number of event loops, ideally
+ *      set to the number of physical cores. Setting higher than that will likely degrade performance.</li>
+ * </ul>
+ */
+public class Couchbase2Client extends DB {
+
+  static {
+    // No need to send the full encoded_plan for this benchmark workload, less network overhead!
+    System.setProperty("com.couchbase.query.encodedPlanEnabled", "false");
+  }
+
+  private static final CouchbaseLogger LOGGER = CouchbaseLoggerFactory.getInstance(Couchbase2Client.class);
+  private static final Object INIT_COORDINATOR = new Object();
+
+  private static volatile CouchbaseEnvironment env = null;
+
+  private Cluster cluster;
+  private Bucket bucket;
+  private String bucketName;
+  private boolean upsert;
+  private PersistTo persistTo;
+  private ReplicateTo replicateTo;
+  private boolean syncMutResponse;
+  private boolean epoll;
+  private long kvTimeout;
+  private boolean adhoc;
+  private boolean kv;
+  private int maxParallelism;
+  private String host;
+  private int kvEndpoints;
+  private int queryEndpoints;
+  private int boost;
+
+  @Override
+  public void init() throws DBException {
+    Properties props = getProperties();
+
+    host = props.getProperty("couchbase.host", "127.0.0.1");
+    bucketName = props.getProperty("couchbase.bucket", "default");
+    String bucketPassword = props.getProperty("couchbase.password", "");
+
+    upsert = props.getProperty("couchbase.upsert", "false").equals("true");
+    persistTo = parsePersistTo(props.getProperty("couchbase.persistTo", "0"));
+    replicateTo = parseReplicateTo(props.getProperty("couchbase.replicateTo", "0"));
+    syncMutResponse = props.getProperty("couchbase.syncMutationResponse", "true").equals("true");
+    adhoc = props.getProperty("couchbase.adhoc", "false").equals("true");
+    kv = props.getProperty("couchbase.kv", "true").equals("true");
+    maxParallelism = Integer.parseInt(props.getProperty("couchbase.maxParallelism", "1"));
+    kvEndpoints = Integer.parseInt(props.getProperty("couchbase.kvEndpoints", "1"));
+    queryEndpoints = Integer.parseInt(props.getProperty("couchbase.queryEndpoints", "5"));
+    epoll = props.getProperty("couchbase.epoll", "false").equals("true");
+    boost = Integer.parseInt(props.getProperty("couchbase.boost", "3"));
+
+    try {
+      synchronized (INIT_COORDINATOR) {
+        if (env == null) {
+          DefaultCouchbaseEnvironment.Builder builder = DefaultCouchbaseEnvironment
+              .builder()
+              .queryEndpoints(queryEndpoints)
+              .callbacksOnIoPool(true)
+              .kvEndpoints(kvEndpoints);
+
+          // Tune boosting and epoll based on settings
+          SelectStrategyFactory factory = boost > 0 ?
+              new BackoffSelectStrategyFactory() : DefaultSelectStrategyFactory.INSTANCE;
+
+          int poolSize = boost > 0 ? boost : Integer.parseInt(
+              System.getProperty("com.couchbase.ioPoolSize", Integer.toString(DefaultCoreEnvironment.IO_POOL_SIZE))
+          );
+          ThreadFactory threadFactory = new DefaultThreadFactory("cb-io", true);
+
+          EventLoopGroup group = epoll ? new EpollEventLoopGroup(poolSize, threadFactory, factory)
+              : new NioEventLoopGroup(poolSize, threadFactory, SelectorProvider.provider(), factory);
+          builder.ioPool(group, new IoPoolShutdownHook(group));
+
+          env = builder.build();
+          logParams();
+        }
+      }
+
+      cluster = CouchbaseCluster.create(env, host);
+      bucket = cluster.openBucket(bucketName, bucketPassword);
+      kvTimeout = env.kvTimeout();
+    } catch (Exception ex) {
+      throw new DBException("Could not connect to Couchbase Bucket.", ex);
+    }
+
+    if (!kv && !syncMutResponse) {
+      throw new DBException("Not waiting for N1QL responses on mutations not yet implemented.");
+    }
+  }
+
+  /**
+   * Helper method to log the CLI params so that on the command line debugging is easier.
+   */
+  private void logParams() {
+    StringBuilder sb = new StringBuilder();
+
+    sb.append("host=").append(host);
+    sb.append(", bucket=").append(bucketName);
+    sb.append(", upsert=").append(upsert);
+    sb.append(", persistTo=").append(persistTo);
+    sb.append(", replicateTo=").append(replicateTo);
+    sb.append(", syncMutResponse=").append(syncMutResponse);
+    sb.append(", adhoc=").append(adhoc);
+    sb.append(", kv=").append(kv);
+    sb.append(", maxParallelism=").append(maxParallelism);
+    sb.append(", queryEndpoints=").append(queryEndpoints);
+    sb.append(", kvEndpoints=").append(kvEndpoints);
+    sb.append(", queryEndpoints=").append(queryEndpoints);
+    sb.append(", epoll=").append(epoll);
+    sb.append(", boost=").append(boost);
+
+    LOGGER.info("===> Using Params: " + sb.toString());
+  }
+
+  @Override
+  public Status read(final String table, final String key, Set<String> fields,
+      final HashMap<String, ByteIterator> result) {
+    try {
+      String docId = formatId(table, key);
+      if (kv) {
+        return readKv(docId, fields, result);
+      } else {
+        return readN1ql(docId, fields, result);
+      }
+    } catch (Exception ex) {
+      ex.printStackTrace();
+      return Status.ERROR;
+    }
+  }
+
+  /**
+   * Performs the {@link #read(String, String, Set, HashMap)} operation via Key/Value ("get").
+   *
+   * @param docId the document ID
+   * @param fields the fields to be loaded
+   * @param result the result map where the doc needs to be converted into
+   * @return The result of the operation.
+   */
+  private Status readKv(final String docId, final Set<String> fields, final HashMap<String, ByteIterator> result)
+    throws Exception {
+    RawJsonDocument loaded = bucket.get(docId, RawJsonDocument.class);
+    if (loaded == null) {
+      return Status.NOT_FOUND;
+    }
+    decode(loaded.content(), fields, result);
+    return Status.OK;
+  }
+
+  /**
+   * Performs the {@link #read(String, String, Set, HashMap)} operation via N1QL ("SELECT").
+   *
+   * If this option should be used, the "-p couchbase.kv=false" property must be set.
+   *
+   * @param docId the document ID
+   * @param fields the fields to be loaded
+   * @param result the result map where the doc needs to be converted into
+   * @return The result of the operation.
+   */
+  private Status readN1ql(final String docId, Set<String> fields, final HashMap<String, ByteIterator> result)
+    throws Exception {
+    String readQuery = "SELECT " + joinFields(fields) + " FROM `" + bucketName + "` USE KEYS [$1]";
+    N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
+        readQuery,
+        JsonArray.from(docId),
+        N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
+    ));
+
+    if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
+      throw new DBException("Error while parsing N1QL Result. Query: " + readQuery
+        + ", Errors: " + queryResult.errors());
+    }
+
+    N1qlQueryRow row;
+    try {
+      row = queryResult.rows().next();
+    } catch (NoSuchElementException ex) {
+      return Status.NOT_FOUND;
+    }
+
+    JsonObject content = row.value();
+    if (fields == null) {
+      content = content.getObject(bucketName); // n1ql result set scoped under *.bucketName
+      fields = content.getNames();
+    }
+
+    for (String field : fields) {
+      Object value = content.get(field);
+      result.put(field, new StringByteIterator(value != null ? value.toString() : ""));
+    }
+
+    return Status.OK;
+  }
+
+  @Override
+  public Status update(final String table, final String key, final HashMap<String, ByteIterator> values) {
+    if (upsert) {
+      return upsert(table, key, values);
+    }
+
+    try {
+      String docId = formatId(table, key);
+      if (kv) {
+        return updateKv(docId, values);
+      } else {
+        return updateN1ql(docId, values);
+      }
+    } catch (Exception ex) {
+      ex.printStackTrace();
+      return Status.ERROR;
+    }
+  }
+
+  /**
+   * Performs the {@link #update(String, String, HashMap)} operation via Key/Value ("replace").
+   *
+   * @param docId the document ID
+   * @param values the values to update the document with.
+   * @return The result of the operation.
+   */
+  private Status updateKv(final String docId, final HashMap<String, ByteIterator> values) {
+    waitForMutationResponse(bucket.async().replace(
+        RawJsonDocument.create(docId, encode(values)),
+        persistTo,
+        replicateTo
+    ));
+    return Status.OK;
+  }
+
+  /**
+   * Performs the {@link #update(String, String, HashMap)} operation via N1QL ("UPDATE").
+   *
+   * If this option should be used, the "-p couchbase.kv=false" property must be set.
+   *
+   * @param docId the document ID
+   * @param values the values to update the document with.
+   * @return The result of the operation.
+   */
+  private Status updateN1ql(final String docId, final HashMap<String, ByteIterator> values)
+    throws Exception {
+    String fields = encodeN1qlFields(values);
+    String updateQuery = "UPDATE `" + bucketName + "` USE KEYS [$1] SET " + fields;
+
+    N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
+        updateQuery,
+        JsonArray.from(docId),
+        N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
+    ));
+
+    if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
+      throw new DBException("Error while parsing N1QL Result. Query: " + updateQuery
+        + ", Errors: " + queryResult.errors());
+    }
+    return Status.OK;
+  }
+
+  @Override
+  public Status insert(final String table, final String key, final HashMap<String, ByteIterator> values) {
+    if (upsert) {
+      return upsert(table, key, values);
+    }
+
+    try {
+      String docId = formatId(table, key);
+      if (kv) {
+        return insertKv(docId, values);
+      } else {
+        return insertN1ql(docId, values);
+      }
+    } catch (Exception ex) {
+      ex.printStackTrace();
+      return Status.ERROR;
+    }
+  }
+
+  /**
+   * Performs the {@link #insert(String, String, HashMap)} operation via Key/Value ("INSERT").
+   *
+   * Note that during the "load" phase it makes sense to retry TMPFAILS (so that even if the server is
+   * overloaded temporarily the ops will succeed eventually). The current code will retry TMPFAILs
+   * for maximum of one minute and then bubble up the error.
+   *
+   * @param docId the document ID
+   * @param values the values to update the document with.
+   * @return The result of the operation.
+   */
+  private Status insertKv(final String docId, final HashMap<String, ByteIterator> values) {
+    int tries = 60; // roughly 60 seconds with the 1 second sleep, not 100% accurate.
+
+    for(int i = 0; i < tries; i++) {
+      try {
+        waitForMutationResponse(bucket.async().insert(
+            RawJsonDocument.create(docId, encode(values)),
+            persistTo,
+            replicateTo
+        ));
+        return Status.OK;
+      } catch (TemporaryFailureException ex) {
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
+          throw new RuntimeException("Interrupted while sleeping on TMPFAIL backoff.", ex);
+        }
+      }
+    }
+
+    throw new RuntimeException("Still receiving TMPFAIL from the server after trying " + tries + " times. " +
+      "Check your server.");
+  }
+
+  /**
+   * Performs the {@link #insert(String, String, HashMap)} operation via N1QL ("INSERT").
+   *
+   * If this option should be used, the "-p couchbase.kv=false" property must be set.
+   *
+   * @param docId the document ID
+   * @param values the values to update the document with.
+   * @return The result of the operation.
+   */
+  private Status insertN1ql(final String docId, final HashMap<String, ByteIterator> values)
+    throws Exception {
+    String insertQuery = "INSERT INTO `" + bucketName + "`(KEY,VALUE) VALUES ($1,$2)";
+
+    N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
+        insertQuery,
+        JsonArray.from(docId, valuesToJsonObject(values)),
+        N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
+    ));
+
+    if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
+      throw new DBException("Error while parsing N1QL Result. Query: " + insertQuery
+        + ", Errors: " + queryResult.errors());
+    }
+    return Status.OK;
+  }
+
+  /**
+   * Performs an upsert instead of insert or update using either Key/Value or N1QL.
+   *
+   * If this option should be used, the "-p couchbase.upsert=true" property must be set.
+   *
+   * @param table The name of the table
+   * @param key The record key of the record to insert.
+   * @param values A HashMap of field/value pairs to insert in the record
+   * @return The result of the operation.
+   */
+  private Status upsert(final String table, final String key, final HashMap<String, ByteIterator> values) {
+    try {
+      String docId = formatId(table, key);
+      if (kv) {
+        return upsertKv(docId, values);
+      } else {
+        return upsertN1ql(docId, values);
+      }
+    } catch (Exception ex) {
+      ex.printStackTrace();
+      return Status.ERROR;
+    }
+  }
+
+  /**
+   * Performs the {@link #upsert(String, String, HashMap)} operation via Key/Value ("upsert").
+   *
+   * If this option should be used, the "-p couchbase.upsert=true" property must be set.
+   *
+   * @param docId the document ID
+   * @param values the values to update the document with.
+   * @return The result of the operation.
+   */
+  private Status upsertKv(final String docId, final HashMap<String, ByteIterator> values) {
+    waitForMutationResponse(bucket.async().upsert(
+        RawJsonDocument.create(docId, encode(values)),
+        persistTo,
+        replicateTo
+    ));
+    return Status.OK;
+  }
+
+  /**
+   * Performs the {@link #upsert(String, String, HashMap)} operation via N1QL ("UPSERT").
+   *
+   * If this option should be used, the "-p couchbase.upsert=true -p couchbase.kv=false" properties must be set.
+   *
+   * @param docId the document ID
+   * @param values the values to update the document with.
+   * @return The result of the operation.
+   */
+  private Status upsertN1ql(final String docId, final HashMap<String, ByteIterator> values)
+    throws Exception {
+    String upsertQuery = "UPSERT INTO `" + bucketName + "`(KEY,VALUE) VALUES ($1,$2)";
+
+    N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
+        upsertQuery,
+        JsonArray.from(docId, valuesToJsonObject(values)),
+        N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
+    ));
+
+    if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
+      throw new DBException("Error while parsing N1QL Result. Query: " + upsertQuery
+        + ", Errors: " + queryResult.errors());
+    }
+    return Status.OK;
+  }
+
+  @Override
+  public Status delete(final String table, final String key) {
+    try {
+      String docId = formatId(table, key);
+      if (kv) {
+        return deleteKv(docId);
+      } else {
+        return deleteN1ql(docId);
+      }
+    } catch (Exception ex) {
+      ex.printStackTrace();
+      return Status.ERROR;
+    }
+  }
+
+  /**
+   * Performs the {@link #delete(String, String)} (String, String)} operation via Key/Value ("remove").
+   *
+   * @param docId the document ID.
+   * @return The result of the operation.
+   */
+  private Status deleteKv(final String docId) {
+    waitForMutationResponse(bucket.async().remove(
+        docId,
+        persistTo,
+        replicateTo
+    ));
+    return Status.OK;
+  }
+
+  /**
+   * Performs the {@link #delete(String, String)} (String, String)} operation via N1QL ("DELETE").
+   *
+   * If this option should be used, the "-p couchbase.kv=false" property must be set.
+   *
+   * @param docId the document ID.
+   * @return The result of the operation.
+   */
+  private Status deleteN1ql(final String docId) throws Exception {
+    String deleteQuery = "DELETE FROM `" + bucketName + "` USE KEYS [$1]";
+    N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
+        deleteQuery,
+        JsonArray.from(docId),
+        N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
+    ));
+
+    if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
+      throw new DBException("Error while parsing N1QL Result. Query: " + deleteQuery
+        + ", Errors: " + queryResult.errors());
+    }
+    return Status.OK;
+  }
+
+  @Override
+  public Status scan(final String table, final String startkey, final int recordcount, final Set<String> fields,
+      final Vector<HashMap<String, ByteIterator>> result) {
+    try {
+      if (fields == null || fields.isEmpty()) {
+        return scanAllFields(table, startkey, recordcount, result);
+      } else {
+        return scanSpecificFields(table, startkey, recordcount, fields, result);
+      }
+    } catch (Exception ex) {
+      ex.printStackTrace();
+      return Status.ERROR;
+    }
+  }
+
+  /**
+   * Performs the {@link #scan(String, String, int, Set, Vector)} operation, optimized for all fields.
+   *
+   * Since the full document bodies need to be loaded anyways, it makes sense to just grab the document IDs
+   * from N1QL and then perform the bulk loading via KV for better performance. This is a usual pattern with
+   * Couchbase and shows the benefits of using both N1QL and KV together.
+   *
+   * @param table The name of the table
+   * @param startkey The record key of the first record to read.
+   * @param recordcount The number of records to read
+   * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
+   * @return The result of the operation.
+   */
+  private Status scanAllFields(final String table, final String startkey, final int recordcount,
+      final Vector<HashMap<String, ByteIterator>> result) {
+    final String scanQuery = "SELECT meta().id as id FROM `" + bucketName + "` WHERE meta().id >= '$1' LIMIT $2";
+    Collection<HashMap<String, ByteIterator>> documents = bucket.async()
+        .query(N1qlQuery.parameterized(
+        scanQuery,
+        JsonArray.from(formatId(table, startkey), recordcount),
+        N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
+      ))
+        .doOnNext(new Action1<AsyncN1qlQueryResult>() {
+          @Override
+          public void call(AsyncN1qlQueryResult result) {
+            if (!result.parseSuccess()) {
+              throw new RuntimeException("Error while parsing N1QL Result. Query: " + scanQuery
+                + ", Errors: " + result.errors());
+            }
+          }
+        })
+        .flatMap(new Func1<AsyncN1qlQueryResult, Observable<AsyncN1qlQueryRow>>() {
+          @Override
+          public Observable<AsyncN1qlQueryRow> call(AsyncN1qlQueryResult result) {
+            return result.rows();
+          }
+        })
+        .flatMap(new Func1<AsyncN1qlQueryRow, Observable<RawJsonDocument>>() {
+          @Override
+          public Observable<RawJsonDocument> call(AsyncN1qlQueryRow row) {
+            return bucket.async().get(row.value().getString("id"), RawJsonDocument.class);
+          }
+        })
+        .map(new Func1<RawJsonDocument, HashMap<String, ByteIterator>>() {
+          @Override
+          public HashMap<String, ByteIterator> call(RawJsonDocument document) {
+            HashMap<String, ByteIterator> tuple = new HashMap<String, ByteIterator>();
+            decode(document.content(), null, tuple);
+            return tuple;
+          }
+        })
+        .toList()
+        .toBlocking()
+        .single();
+
+    result.addAll(documents);
+    return Status.OK;
+  }
+
+  /**
+   * Performs the {@link #scan(String, String, int, Set, Vector)} operation N1Ql only for a subset of the fields.
+   *
+   * @param table The name of the table
+   * @param startkey The record key of the first record to read.
+   * @param recordcount The number of records to read
+   * @param fields The list of fields to read, or null for all of them
+   * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
+   * @return The result of the operation.
+   */
+  private Status scanSpecificFields(final String table, final String startkey, final int recordcount,
+      final Set<String> fields, final Vector<HashMap<String, ByteIterator>> result) {
+    String scanQuery = "SELECT " + joinFields(fields) + " FROM `" + bucketName + "` WHERE meta().id >= '$1' LIMIT $2";
+    N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
+        scanQuery,
+        JsonArray.from(formatId(table, startkey), recordcount),
+        N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
+    ));
+
+    if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
+      throw new RuntimeException("Error while parsing N1QL Result. Query: " + scanQuery
+        + ", Errors: " + queryResult.errors());
+    }
+
+    boolean allFields = fields == null || fields.isEmpty();
+    result.ensureCapacity(recordcount);
+
+    for (N1qlQueryRow row : queryResult) {
+      JsonObject value = row.value();
+      if (fields == null) {
+        value = value.getObject(bucketName);
+      }
+      Set<String> f = allFields ? value.getNames() : fields;
+      HashMap<String, ByteIterator> tuple = new HashMap<String, ByteIterator>(f.size());
+      for (String field : f) {
+        tuple.put(field, new StringByteIterator(value.getString(field)));
+      }
+      result.add(tuple);
+    }
+    return Status.OK;
+  }
+
+  /**
+   * Helper method to block on the response, depending on the property set.
+   *
+   * By default, since YCSB is sync the code will always wait for the operation to complete. In some
+   * cases it can be useful to just "drive load" and disable the waiting. Note that when the
+   * "-p couchbase.syncMutationResponse=false" option is used, the measured results by YCSB can basically
+   * be thrown away. Still helpful sometimes during load phases to speed them up :)
+   *
+   * @param input the async input observable.
+   */
+  private void waitForMutationResponse(final Observable<? extends Document<?>> input) {
+    if (!syncMutResponse) {
+      input.subscribe(new Subscriber<Document<?>>() {
+        @Override
+        public void onCompleted() {
+        }
+
+        @Override
+        public void onError(Throwable e) {
+        }
+
+        @Override
+        public void onNext(Document<?> document) {
+        }
+      });
+    } else {
+      Blocking.blockForSingle(input, kvTimeout, TimeUnit.MILLISECONDS);
+    }
+  }
+
+  /**
+   * Helper method to turn the values into a String, used with {@link #upsertN1ql(String, HashMap)}.
+   *
+   * @param values the values to encode.
+   * @return the encoded string.
+   */
+  private static String encodeN1qlFields(final HashMap<String, ByteIterator> values) {
+    if (values.isEmpty()) {
+      return "";
+    }
+
+    StringBuilder sb = new StringBuilder();
+    for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
+      String raw = entry.getValue().toString();
+      String escaped = raw.replace("\"", "\\\"").replace("\'", "\\\'");
+      sb.append(entry.getKey()).append("=\"").append(escaped).append("\" ");
+    }
+    String toReturn = sb.toString();
+    return toReturn.substring(0, toReturn.length() - 1);
+  }
+
+  /**
+   * Helper method to turn the map of values into a {@link JsonObject} for further use.
+   *
+   * @param values the values to transform.
+   * @return the created json object.
+   */
+  private static JsonObject valuesToJsonObject(final HashMap<String, ByteIterator> values) {
+    JsonObject result = JsonObject.create();
+    for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
+      result.put(entry.getKey(), entry.getValue().toString());
+    }
+    return result;
+  }
+
+  /**
+   * Helper method to join the set of fields into a String suitable for N1QL.
+   *
+   * @param fields the fields to join.
+   * @return the joined fields as a String.
+   */
+  private static String joinFields(final Set<String> fields) {
+    if (fields == null || fields.isEmpty()) {
+      return "*";
+    }
+    StringBuilder builder = new StringBuilder();
+    for (String f : fields) {
+      builder.append("`").append(f).append("`").append(",");
+    }
+    String toReturn = builder.toString();
+    return toReturn.substring(0, toReturn.length() - 1);
+  }
+
+  /**
+   * Helper method to turn the prefix and key into a proper document ID.
+   *
+   * @param prefix the prefix (table).
+   * @param key the key itself.
+   * @return a document ID that can be used with Couchbase.
+   */
+  private static String formatId(final String prefix, final String key) {
+    return prefix + ":" + key;
+  }
+
+  /**
+   * Helper method to parse the "ReplicateTo" property on startup.
+   *
+   * @param property the proeprty to parse.
+   * @return the parsed setting.
+   */
+  private static ReplicateTo parseReplicateTo(final String property) throws DBException {
+    int value = Integer.parseInt(property);
+
+    switch (value) {
+    case 0:
+      return ReplicateTo.NONE;
+    case 1:
+      return ReplicateTo.ONE;
+    case 2:
+      return ReplicateTo.TWO;
+    case 3:
+      return ReplicateTo.THREE;
+    default:
+      throw new DBException("\"couchbase.replicateTo\" must be between 0 and 3");
+    }
+  }
+
+  /**
+   * Helper method to parse the "PersistTo" property on startup.
+   *
+   * @param property the proeprty to parse.
+   * @return the parsed setting.
+   */
+  private static PersistTo parsePersistTo(final String property) throws DBException {
+    int value = Integer.parseInt(property);
+
+    switch (value) {
+    case 0:
+      return PersistTo.NONE;
+    case 1:
+      return PersistTo.ONE;
+    case 2:
+      return PersistTo.TWO;
+    case 3:
+      return PersistTo.THREE;
+    case 4:
+      return PersistTo.FOUR;
+    default:
+      throw new DBException("\"couchbase.persistTo\" must be between 0 and 4");
+    }
+  }
+
+  /**
+   * Decode the String from server and pass it into the decoded destination.
+   *
+   * @param source the loaded object.
+   * @param fields the fields to check.
+   * @param dest the result passed back to YCSB.
+   */
+  private void decode(final String source, final Set<String> fields,
+                      final HashMap<String, ByteIterator> dest) {
+    try {
+      JsonNode json = JacksonTransformers.MAPPER.readTree(source);
+      boolean checkFields = fields != null && !fields.isEmpty();
+      for (Iterator<Map.Entry<String, JsonNode>> jsonFields = json.fields(); jsonFields.hasNext();) {
+        Map.Entry<String, JsonNode> jsonField = jsonFields.next();
+        String name = jsonField.getKey();
+        if (checkFields && fields.contains(name)) {
+          continue;
+        }
+        JsonNode jsonValue = jsonField.getValue();
+        if (jsonValue != null && !jsonValue.isNull()) {
+          dest.put(name, new StringByteIterator(jsonValue.asText()));
+        }
+      }
+    } catch (Exception e) {
+      throw new RuntimeException("Could not decode JSON");
+    }
+  }
+
+  /**
+   * Encode the source into a String for storage.
+   *
+   * @param source the source value.
+   * @return the encoded string.
+   */
+  private String encode(final HashMap<String, ByteIterator> source) {
+    HashMap<String, String> stringMap = StringByteIterator.getStringMap(source);
+    ObjectNode node = JacksonTransformers.MAPPER.createObjectNode();
+    for (Map.Entry<String, String> pair : stringMap.entrySet()) {
+      node.put(pair.getKey(), pair.getValue());
+    }
+    JsonFactory jsonFactory = new JsonFactory();
+    Writer writer = new StringWriter();
+    try {
+      JsonGenerator jsonGenerator = jsonFactory.createGenerator(writer);
+      JacksonTransformers.MAPPER.writeTree(jsonGenerator, node);
+    } catch (Exception e) {
+      throw new RuntimeException("Could not encode JSON value");
+    }
+    return writer.toString();
+  }
+}
+
+/**
+ * Factory for the {@link BackoffSelectStrategy} to be used with boosting.
+ */
+class BackoffSelectStrategyFactory implements SelectStrategyFactory {
+  @Override
+  public SelectStrategy newSelectStrategy() {
+    return new BackoffSelectStrategy();
+  }
+}
+
+/**
+ * Custom IO select strategy which trades CPU for throughput, used with the boost setting.
+ */
+class BackoffSelectStrategy implements SelectStrategy {
+
+  private int counter = 0;
+
+  @Override
+  public int calculateStrategy(final IntSupplier supplier, final boolean hasTasks) throws Exception {
+    int selectNowResult = supplier.get();
+    if (hasTasks || selectNowResult != 0) {
+      counter = 0;
+      return selectNowResult;
+    }
+    counter++;
+
+    if (counter > 2000) {
+      LockSupport.parkNanos(1);
+    } else if (counter > 3000) {
+      Thread.yield();
+    } else if (counter > 4000) {
+      LockSupport.parkNanos(1000);
+    } else if (counter > 5000) {
+      // defer to blocking select
+      counter = 0;
+      return SelectStrategy.SELECT;
+    }
+
+    return SelectStrategy.CONTINUE;
+  }
+}
diff --git a/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/package-info.java b/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/package-info.java
new file mode 100644
index 0000000000000000000000000000000000000000..0eb3b3992b30adf338223870471a1e75ed82ab3b
--- /dev/null
+++ b/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2015 - 2016 YCSB contributors. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+
+/**
+ * The YCSB binding for <a href="http://www.couchbase.com/">Couchbase</a>, new driver.
+ */
+package com.yahoo.ycsb.db.couchbase2;
+
diff --git a/distribution/pom.xml b/distribution/pom.xml
index 1e713fee184d24671d7243a9008a2bd0749d3a1e..a74d8f1b16aae89ac8eca282df8f30074a0ed643 100644
--- a/distribution/pom.xml
+++ b/distribution/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>root</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
   </parent>
 
   <artifactId>ycsb</artifactId>
@@ -59,6 +59,11 @@ LICENSE file.
       <artifactId>couchbase-binding</artifactId>
       <version>${project.version}</version>
     </dependency>
+    <dependency>
+      <groupId>com.yahoo.ycsb</groupId>
+      <artifactId>couchbase2-binding</artifactId>
+      <version>${project.version}</version>
+    </dependency>
     <dependency>
       <groupId>com.yahoo.ycsb</groupId>
       <artifactId>dynamodb-binding</artifactId>
@@ -79,6 +84,11 @@ LICENSE file.
       <artifactId>googledatastore-binding</artifactId>
       <version>${project.version}</version>
     </dependency>
+    <dependency>
+      <groupId>com.yahoo.ycsb</groupId>
+      <artifactId>googlebigtable-binding</artifactId>
+      <version>${project.version}</version>
+    </dependency>
     <dependency>
       <groupId>com.yahoo.ycsb</groupId>
       <artifactId>hbase094-binding</artifactId>
diff --git a/dynamodb/pom.xml b/dynamodb/pom.xml
index d011048b91eb651ae305dd74b02673656736c386..593371a649b90929a99c90851e43ef6fb8233fa5 100644
--- a/dynamodb/pom.xml
+++ b/dynamodb/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
diff --git a/elasticsearch/README.md b/elasticsearch/README.md
index 344ea9c91ac73a509909516688dcec0a2eaec8b9..157ccec0c04fe4688da30af531b5fbdc3db4703d 100644
--- a/elasticsearch/README.md
+++ b/elasticsearch/README.md
@@ -43,15 +43,12 @@ For further configuration see below:
 The default setting for the Elasticsearch node that is created is as follows:
 
     cluster.name=es.ycsb.cluster
-    node.local=true
-    path.data=$TEMP_DIR/esdata
-    discovery.zen.ping.multicast.enabled=false
-    index.mapping._id.indexed=true
-    index.gateway.type=none
-    gateway.type=none
-    index.number_of_shards=1
-    index.number_of_replicas=0
     es.index.key=es.ycsb
+    es.number_of_shards=1
+    es.number_of_replicas=0
+    es.remote=false
+    es.newdb=false
+    es.hosts.list=localhost:9200 (only applies if es.remote=true)
 
 ### Custom Configuration
 If you wish to customize the settings used to create the Elasticsearch node
@@ -66,25 +63,17 @@ pass it into the Elasticsearch client:
 
     ./bin/ycsb run elasticsearch -P workloads/workloada -P myproperties.data -s
 
-
-If you wish to use a in-memory store type rather than the default disk store add 
-the following properties to your custom properties file. For a large number of 
-insert operations insure that you have sufficient memory on your test system 
-otherwise you will run out of memory.
-
-    index.store.type=memory
-    index.store.fs.memory.enabled=true
-    cache.memory.small_buffer_size=4mb
-    cache.memory.large_cache_size=1024mb
-
 If you wish to change the default index name you can set the following property:
 
     es.index.key=my_index_key
 
-### Troubleshoot
-If you encounter error messages such as :
-"Primary shard is not active or isn't assigned is a known node."
+If you wish to run against a remote cluster you can set the following property:
+
+    es.remote=true
+
+By default this will use localhost:9300 as a seed node to discover the cluster.
+You can also specify
 
-Try removing /tmp/esdata/ folder. 
-    rm -rf /tmp/esdata
+    es.hosts.list=(\w+:\d+)+
 
+(a comma-separated list of host/port pairs) to change this.
diff --git a/elasticsearch/pom.xml b/elasticsearch/pom.xml
index c4aad6324f124d3e807bf1eb2f1ec65f13ea7675..8293fa82b7532348ae92fc37dca06f05112efe38 100644
--- a/elasticsearch/pom.xml
+++ b/elasticsearch/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
     <parent>
         <groupId>com.yahoo.ycsb</groupId>
         <artifactId>binding-parent</artifactId>
-        <version>0.8.0-SNAPSHOT</version>
+        <version>0.9.0-SNAPSHOT</version>
         <relativePath>../binding-parent</relativePath>
     </parent>
 
@@ -29,7 +29,7 @@ LICENSE file.
     <name>Elasticsearch Binding</name>
     <packaging>jar</packaging>
     <properties>
-        <elasticsearch-version>2.2.0</elasticsearch-version>
+        <elasticsearch-version>2.3.1</elasticsearch-version>
     </properties>
     <dependencies>
         <dependency>
diff --git a/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java b/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java
index 1d79e3c2897eb79e25363ba15c81f7bd00d818d6..3c0762cd097a3b1cd913515773be9cfc4131c943 100644
--- a/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java
+++ b/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java
@@ -22,7 +22,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
 import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
 import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
 
-
 import com.yahoo.ycsb.ByteIterator;
 import com.yahoo.ycsb.DB;
 import com.yahoo.ycsb.DBException;
@@ -30,6 +29,7 @@ import com.yahoo.ycsb.Status;
 import com.yahoo.ycsb.StringByteIterator;
 
 import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
 import org.elasticsearch.action.get.GetResponse;
 import org.elasticsearch.action.search.SearchResponse;
 import org.elasticsearch.client.Client;
@@ -57,19 +57,19 @@ import java.util.Vector;
  * Default properties to set:
  * </p>
  * <ul>
- * <li>es.cluster.name = es.ycsb.cluster
- * <li>es.client = true
+ * <li>cluster.name = es.ycsb.cluster
  * <li>es.index.key = es.ycsb
+ * <li>es.number_of_shards = 1
+ * <li>es.number_of_replicas = 0
  * </ul>
- *
- * @author Sharmarke Aden
- *
  */
 public class ElasticsearchClient extends DB {
 
-  public static final String DEFAULT_CLUSTER_NAME = "es.ycsb.cluster";
-  public static final String DEFAULT_INDEX_KEY = "es.ycsb";
-  public static final String DEFAULT_REMOTE_HOST = "localhost:9300";
+  private static final String DEFAULT_CLUSTER_NAME = "es.ycsb.cluster";
+  private static final String DEFAULT_INDEX_KEY = "es.ycsb";
+  private static final String DEFAULT_REMOTE_HOST = "localhost:9300";
+  private static final int NUMBER_OF_SHARDS = 1;
+  private static final int NUMBER_OF_REPLICAS = 0;
   private Node node;
   private Client client;
   private String indexKey;
@@ -83,32 +83,26 @@ public class ElasticsearchClient extends DB {
   public void init() throws DBException {
     Properties props = getProperties();
     this.indexKey = props.getProperty("es.index.key", DEFAULT_INDEX_KEY);
-    String clusterName =
-        props.getProperty("cluster.name", DEFAULT_CLUSTER_NAME);
+
+    int numberOfShards = parseIntegerProperty(props, "es.number_of_shards", NUMBER_OF_SHARDS);
+    int numberOfReplicas = parseIntegerProperty(props, "es.number_of_replicas", NUMBER_OF_REPLICAS);
+
     // Check if transport client needs to be used (To connect to multiple
     // elasticsearch nodes)
-    remoteMode = Boolean
-        .parseBoolean(props.getProperty("elasticsearch.remote", "false"));
-    Boolean newdb =
-        Boolean.parseBoolean(props.getProperty("elasticsearch.newdb", "false"));
+    remoteMode = Boolean.parseBoolean(props.getProperty("es.remote", "false"));
+    Boolean newdb = Boolean.parseBoolean(props.getProperty("es.newdb", "false"));
     Builder settings = Settings.settingsBuilder()
-        .put("node.local", "true")
-        .put("path.data", System.getProperty("java.io.tmpdir") + "/esdata")
-        .put("discovery.zen.ping.multicast.enabled", "false")
-        .put("index.mapping._id.indexed", "true")
-        .put("index.gateway.type", "none")
-        .put("index.number_of_shards", "1")
-        .put("index.number_of_replicas", "0")
+        .put("cluster.name", DEFAULT_CLUSTER_NAME)
+        .put("node.local", Boolean.toString(!remoteMode))
         .put("path.home", System.getProperty("java.io.tmpdir"));
 
     // if properties file contains elasticsearch user defined properties
     // add it to the settings file (will overwrite the defaults).
     settings.put(props);
-    System.out.println(
-        "Elasticsearch starting node = " + settings.get("cluster.name"));
-    System.out
-        .println("Elasticsearch node data path = " + settings.get("path.data"));
-    System.out.println("Elasticsearch Remote Mode = " + remoteMode);
+    final String clusterName = settings.get("cluster.name");
+    System.err.println("Elasticsearch starting node = " + clusterName);
+    System.err.println("Elasticsearch node path.home = " + settings.get("path.home"));
+    System.err.println("Elasticsearch Remote Mode = " + remoteMode);
     // Remote mode support for connecting to remote elasticsearch cluster
     if (remoteMode) {
       settings.put("client.transport.sniff", true)
@@ -116,13 +110,9 @@ public class ElasticsearchClient extends DB {
           .put("client.transport.ping_timeout", "30s")
           .put("client.transport.nodes_sampler_interval", "30s");
       // Default it to localhost:9300
-      String[] nodeList =
-          props.getProperty("elasticsearch.hosts.list", DEFAULT_REMOTE_HOST)
-              .split(",");
-      System.out.println("Elasticsearch Remote Hosts = "
-          + props.getProperty("elasticsearch.hosts.list", DEFAULT_REMOTE_HOST));
-      TransportClient tClient = TransportClient.builder()
-                                  .settings(settings).build();
+      String[] nodeList = props.getProperty("es.hosts.list", DEFAULT_REMOTE_HOST).split(",");
+      System.out.println("Elasticsearch Remote Hosts = " + props.getProperty("es.hosts.list", DEFAULT_REMOTE_HOST));
+      TransportClient tClient = TransportClient.builder().settings(settings).build();
       for (String h : nodeList) {
         String[] nodes = h.split(":");
         try {
@@ -143,21 +133,29 @@ public class ElasticsearchClient extends DB {
       client = node.client();
     }
 
-    //wait for shards to be ready
-    client.admin().cluster()
-      .health(new ClusterHealthRequest("lists").waitForActiveShards(1))
-      .actionGet();
-    if (newdb) {
+    final boolean exists =
+            client.admin().indices()
+                    .exists(Requests.indicesExistsRequest(indexKey)).actionGet()
+                    .isExists();
+    if (exists && newdb) {
       client.admin().indices().prepareDelete(indexKey).execute().actionGet();
-      client.admin().indices().prepareCreate(indexKey).execute().actionGet();
-    } else {
-      boolean exists = client.admin().indices()
-          .exists(Requests.indicesExistsRequest(indexKey)).actionGet()
-          .isExists();
-      if (!exists) {
-        client.admin().indices().prepareCreate(indexKey).execute().actionGet();
-      }
     }
+    if (!exists || newdb) {
+      client.admin().indices().create(
+              new CreateIndexRequest(indexKey)
+                      .settings(
+                              Settings.builder()
+                                      .put("index.number_of_shards", numberOfShards)
+                                      .put("index.number_of_replicas", numberOfReplicas)
+                                      .put("index.mapping._id.indexed", true)
+                      )).actionGet();
+    }
+    client.admin().cluster().health(new ClusterHealthRequest().waitForGreenStatus()).actionGet();
+  }
+
+  private int parseIntegerProperty(Properties properties, String key, int defaultValue) {
+    String value = properties.getProperty(key);
+    return value == null ? defaultValue : Integer.parseInt(value);
   }
 
   @Override
@@ -192,15 +190,13 @@ public class ElasticsearchClient extends DB {
     try {
       final XContentBuilder doc = jsonBuilder().startObject();
 
-      for (Entry<String, String> entry : StringByteIterator.getStringMap(values)
-          .entrySet()) {
+      for (Entry<String, String> entry : StringByteIterator.getStringMap(values).entrySet()) {
         doc.field(entry.getKey(), entry.getValue());
       }
 
       doc.endObject();
 
-      client.prepareIndex(indexKey, table, key).setSource(doc).execute()
-          .actionGet();
+      client.prepareIndex(indexKey, table, key).setSource(doc).execute().actionGet();
 
       return Status.OK;
     } catch (Exception e) {
@@ -248,8 +244,7 @@ public class ElasticsearchClient extends DB {
   public Status read(String table, String key, Set<String> fields,
       HashMap<String, ByteIterator> result) {
     try {
-      final GetResponse response =
-          client.prepareGet(indexKey, table, key).execute().actionGet();
+      final GetResponse response = client.prepareGet(indexKey, table, key).execute().actionGet();
 
       if (response.isExists()) {
         if (fields != null) {
@@ -289,17 +284,14 @@ public class ElasticsearchClient extends DB {
   public Status update(String table, String key,
       HashMap<String, ByteIterator> values) {
     try {
-      final GetResponse response =
-          client.prepareGet(indexKey, table, key).execute().actionGet();
+      final GetResponse response = client.prepareGet(indexKey, table, key).execute().actionGet();
 
       if (response.isExists()) {
-        for (Entry<String, String> entry : StringByteIterator
-            .getStringMap(values).entrySet()) {
+        for (Entry<String, String> entry : StringByteIterator.getStringMap(values).entrySet()) {
           response.getSource().put(entry.getKey(), entry.getValue());
         }
 
-        client.prepareIndex(indexKey, table, key)
-            .setSource(response.getSource()).execute().actionGet();
+        client.prepareIndex(indexKey, table, key).setSource(response.getSource()).execute().actionGet();
 
         return Status.OK;
       }
@@ -343,11 +335,10 @@ public class ElasticsearchClient extends DB {
       HashMap<String, ByteIterator> entry;
 
       for (SearchHit hit : response.getHits()) {
-        entry = new HashMap<String, ByteIterator>(fields.size());
+        entry = new HashMap<>(fields.size());
 
         for (String field : fields) {
-          entry.put(field,
-              new StringByteIterator((String) hit.getSource().get(field)));
+          entry.put(field, new StringByteIterator((String) hit.getSource().get(field)));
         }
 
         result.add(entry);
diff --git a/elasticsearch/src/test/java/com/yahoo/ycsb/db/ElasticsearchClientTest.java b/elasticsearch/src/test/java/com/yahoo/ycsb/db/ElasticsearchClientTest.java
index 1a80cca37a0137a7c44d39927975e9d7053f6135..69e52ff678f2c29a39fd1e433293f893db2a3e58 100644
--- a/elasticsearch/src/test/java/com/yahoo/ycsb/db/ElasticsearchClientTest.java
+++ b/elasticsearch/src/test/java/com/yahoo/ycsb/db/ElasticsearchClientTest.java
@@ -38,10 +38,6 @@ import java.util.HashMap;
 import java.util.Set;
 import java.util.Vector;
 
-/**
- *
- * @author saden
- */
 public class ElasticsearchClientTest {
 
     protected final static ElasticsearchClient instance = new ElasticsearchClient();
diff --git a/geode/pom.xml b/geode/pom.xml
index 6d614cdf17e4e2c5a6b1f780799e6f71f215da29..40111413e57ee824c8b717296c6228c5719ebb27 100644
--- a/geode/pom.xml
+++ b/geode/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   
diff --git a/googlebigtable/README.md b/googlebigtable/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3938b525eaa3fcc02ca06e8033cd402214ede08a
--- /dev/null
+++ b/googlebigtable/README.md
@@ -0,0 +1,80 @@
+<!--
+Copyright (c) 2016 YCSB contributors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you
+may not use this file except in compliance with the License. You
+may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied. See the License for the specific language governing
+permissions and limitations under the License. See accompanying
+LICENSE file.
+-->
+
+# Google Bigtable  Driver for YCSB
+
+This driver provides a YCSB workload binding for Google's hosted Bigtable, the inspiration for a number of key-value stores like HBase and Cassandra. The Bigtable Java client provides both Protobuf based GRPC and HBase client APIs. This binding implements the Protobuf API for testing the native client. To test Bigtable using the HBase API, see the `hbase10` binding.
+
+## Quickstart
+
+### 1. Setup a Bigtable Cluster
+
+Login to the Google Cloud Console and follow the [Creating Cluster](https://cloud.google.com/bigtable/docs/creating-cluster) steps. Make a note of your cluster name, zone and project ID.
+
+### 2. Launch the Bigtable Shell
+
+From the Cloud Console, launch a shell and follow the [Quickstart](https://cloud.google.com/bigtable/docs/quickstart) up to step 4 where you launch the HBase shell.
+
+### 3. Create a Table
+
+For best results, use the pre-splitting strategy recommended in [HBASE-4163](https://issues.apache.org/jira/browse/HBASE-4163):
+
+```
+hbase(main):001:0> n_splits = 200 # HBase recommends (10 * number of regionservers)
+hbase(main):002:0> create 'usertable', 'cf', {SPLITS => (1..n_splits).map {|i| "user#{1000+i*(9999-1000)/n_splits}"}}
+```
+
+Make a note of the column family, in this example it's `cf``.
+
+### 4. Fetch the Proper ALPN Boot Jar
+
+The Bigtable protocol uses HTTP/2 which requires an ALPN protocol negotiation implementation. On JVM instantiation the implementation must be loaded before attempting to connect to the cluster. If you're using Java 7 or 8, use this [Jetty Version Table](http://www.eclipse.org/jetty/documentation/current/alpn-chapter.html#alpn-versions) to determine the version appropriate for your JVM. (ALPN is included in JDK 9+). Download the proper jar from [Maven](http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22org.mortbay.jetty.alpn%22%20AND%20a%3A%22alpn-boot%22) somewhere on your system.
+
+### 5. Download JSON Credentials
+
+Follow these instructions for [Generating a JSON key](https://cloud.google.com/bigtable/docs/installing-hbase-shell#service-account) and save it to your host.
+
+### 6. Load a Workload
+
+Switch to the root of the YCSB repo and choose the workload you want to run and `load` it first. With the CLI you must provide the column family, cluster properties and the ALPN jar to load.
+
+```
+bin/ycsb load googlebigtable -p columnfamily=cf -p google.bigtable.project.id=<PROJECT_ID> -p google.bigtable.cluster.name=<CLUSTER> -p google.bigtable.zone.name=<ZONE> -p google.bigtable.auth.service.account.enable=true -p google.bigtable.auth.json.keyfile=<PATH_TO_JSON_KEY> -jvm-args='-Xbootclasspath/p:<PATH_TO_ALPN_JAR>' -P workloads/workloada
+
+```
+
+Make sure to replace the variables in the angle brackets above with the proper value from your cluster. Additional configuration parameters are available below.
+
+The `load` step only executes inserts into the datastore. After loading data, run the same workload to mix reads with writes.
+
+```
+bin/ycsb run googlebigtable -p columnfamily=cf -p google.bigtable.project.id=<PROJECT_ID> -p google.bigtable.cluster.name=<CLUSTER> -p google.bigtable.zone.name=<ZONE> -p google.bigtable.auth.service.account.enable=true -p google.bigtable.auth.json.keyfile=<PATH_TO_JSON_KEY> -jvm-args='-Xbootclasspath/p:<PATH_TO_ALPN_JAR>' -P workloads/workloada
+
+```
+
+## Configuration Options
+
+The following options can be configured using CLI (using the `-p` parameter) or hbase-site.xml (add the HBase config directory to YCSB's class path via CLI). Check the [Cloud Bigtable Client](https://github.com/manolama/cloud-bigtable-client) project for additional tuning parameters.
+
+* `columnfamily`: (Required) The Bigtable column family to target.
+* `google.bigtable.project.id`: (Required) The ID of a Bigtable project.
+* `google.bigtable.cluster.name`: (Required) The name of a Bigtable cluster.
+* `google.bigtable.zone.name`: (Required) Zone where the Bigtable cluster is running.
+* `google.bigtable.auth.service.account.enable`: Whether or not to authenticate with a service account. The default is true.
+* `google.bigtable.auth.json.keyfile`: (Required) A service account key for authentication.
+* `debug`: If true, prints debug information to standard out. The default is false.
+* `clientbuffering`: Whether or not to use client side buffering and batching of write operations. This can significantly improve performance and defaults to true.
diff --git a/googlebigtable/pom.xml b/googlebigtable/pom.xml
new file mode 100644
index 0000000000000000000000000000000000000000..5e70aa4481cd077ca06bc0d227436c6d4f5b7dea
--- /dev/null
+++ b/googlebigtable/pom.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2016 YCSB contributors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you
+may not use this file except in compliance with the License. You
+may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied. See the License for the specific language governing
+permissions and limitations under the License. See accompanying
+LICENSE file.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>com.yahoo.ycsb</groupId>
+    <artifactId>binding-parent</artifactId>
+    <version>0.9.0-SNAPSHOT</version>
+    <relativePath>../binding-parent/</relativePath>
+  </parent>
+
+  <artifactId>googlebigtable-binding</artifactId>
+  <name>Google Cloud Bigtable Binding</name>
+  <packaging>jar</packaging>
+
+  <dependencies>
+    <dependency>
+      <groupId>com.google.cloud.bigtable</groupId>
+      <artifactId>bigtable-hbase-1.0</artifactId>
+      <version>${googlebigtable.version}</version>
+    </dependency>
+    
+    <dependency>
+      <groupId>com.yahoo.ycsb</groupId>
+      <artifactId>core</artifactId>
+      <version>${project.version}</version>
+      <scope>provided</scope>
+    </dependency>
+    
+  </dependencies>
+</project>
\ No newline at end of file
diff --git a/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java b/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java
new file mode 100644
index 0000000000000000000000000000000000000000..86ab94eac7d74a9c85c5edb7f883fa9cbd5bcdb7
--- /dev/null
+++ b/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java
@@ -0,0 +1,443 @@
+/**
+ * Copyright (c) 2016 YCSB contributors. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+package com.yahoo.ycsb.db;
+
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Properties;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.util.Set;
+import java.util.Vector;
+import java.util.concurrent.ExecutionException;
+
+import com.google.bigtable.repackaged.com.google.protobuf.ByteString;
+import com.google.bigtable.repackaged.com.google.protobuf.ServiceException;
+import com.google.bigtable.v1.Column;
+import com.google.bigtable.v1.Family;
+import com.google.bigtable.v1.MutateRowRequest;
+import com.google.bigtable.v1.Mutation;
+import com.google.bigtable.v1.ReadRowsRequest;
+import com.google.bigtable.v1.Row;
+import com.google.bigtable.v1.RowFilter;
+import com.google.bigtable.v1.RowRange;
+import com.google.bigtable.v1.Mutation.DeleteFromRow;
+import com.google.bigtable.v1.Mutation.SetCell;
+import com.google.bigtable.v1.RowFilter.Chain.Builder;
+import com.google.cloud.bigtable.config.BigtableOptions;
+import com.google.cloud.bigtable.grpc.BigtableDataClient;
+import com.google.cloud.bigtable.grpc.BigtableSession;
+import com.google.cloud.bigtable.grpc.async.AsyncExecutor;
+import com.google.cloud.bigtable.grpc.async.HeapSizeManager;
+import com.google.cloud.bigtable.hbase.BigtableOptionsFactory;
+import com.google.cloud.bigtable.util.ByteStringer;
+import com.yahoo.ycsb.ByteArrayByteIterator;
+import com.yahoo.ycsb.ByteIterator;
+import com.yahoo.ycsb.DBException;
+import com.yahoo.ycsb.Status;
+
+/**
+ * Google Bigtable Proto client for YCSB framework.
+ * 
+ * Bigtable offers two APIs. These include a native Protobuf GRPC API as well as 
+ * an HBase API wrapper for the GRPC API. This client implements the Protobuf 
+ * API to test the underlying calls wrapped up in the HBase API. To use the 
+ * HBase API, see the hbase10 client binding.
+ */
+public class GoogleBigtableClient extends com.yahoo.ycsb.DB {
+  public static final Charset UTF8_CHARSET = Charset.forName("UTF8");
+  
+  /** Property names for the CLI. */
+  private static final String ASYNC_MUTATOR_MAX_MEMORY = "mutatorMaxMemory";
+  private static final String ASYNC_MAX_INFLIGHT_RPCS = "mutatorMaxInflightRPCs";
+  private static final String CLIENT_SIDE_BUFFERING = "clientbuffering";
+  
+  /** Tracks running thread counts so we know when to close the session. */ 
+  private static int threadCount = 0;
+  
+  /** This will load the hbase-site.xml config file and/or store CLI options. */
+  private static final Configuration CONFIG = HBaseConfiguration.create();
+  
+  /** Print debug information to standard out. */
+  private boolean debug = false;
+  
+  /** Global Bigtable native API objects. */ 
+  private static BigtableOptions options;
+  private static BigtableSession session;
+  
+  /** Thread loacal Bigtable native API objects. */
+  private BigtableDataClient client;
+  private HeapSizeManager heapSizeManager;
+  private AsyncExecutor asyncExecutor;
+  
+  /** The column family use for the workload. */
+  private byte[] columnFamilyBytes;
+  
+  /** Cache for the last table name/ID to avoid byte conversions. */
+  private String lastTable = "";
+  private byte[] lastTableBytes;
+  
+  /**
+   * If true, buffer mutations on the client. For measuring insert/update/delete 
+   * latencies, client side buffering should be disabled.
+   */
+  private boolean clientSideBuffering = false;
+
+  @Override
+  public void init() throws DBException {
+    Properties props = getProperties();
+    
+    // Defaults the user can override if needed
+    CONFIG.set("google.bigtable.auth.service.account.enable", "true");
+    
+    // make it easy on ourselves by copying all CLI properties into the config object.
+    final Iterator<Entry<Object, Object>> it = props.entrySet().iterator();
+    while (it.hasNext()) {
+      Entry<Object, Object> entry = it.next();
+      CONFIG.set((String)entry.getKey(), (String)entry.getValue());
+    }
+    
+    clientSideBuffering = getProperties().getProperty(CLIENT_SIDE_BUFFERING, "false")
+        .equals("true") ? true : false;
+    
+    System.err.println("Running Google Bigtable with Proto API" +
+         (clientSideBuffering ? " and client side buffering." : "."));
+    
+    synchronized (CONFIG) {
+      ++threadCount;
+      if (session == null) {
+        try {
+          options = BigtableOptionsFactory.fromConfiguration(CONFIG);
+          session = new BigtableSession(options);
+          // important to instantiate the first client here, otherwise the
+          // other threads may receive an NPE from the options when they try
+          // to read the cluster name.
+          client = session.getDataClient();
+        } catch (IOException e) {
+          throw new DBException("Error loading options from config: ", e);
+        }
+      } else {
+        client = session.getDataClient();
+      }
+      
+      if (clientSideBuffering) {
+        heapSizeManager = new HeapSizeManager(
+            Long.parseLong(
+                getProperties().getProperty(ASYNC_MUTATOR_MAX_MEMORY, 
+                    Long.toString(AsyncExecutor.ASYNC_MUTATOR_MAX_MEMORY_DEFAULT))),
+            Integer.parseInt(
+                getProperties().getProperty(ASYNC_MAX_INFLIGHT_RPCS, 
+                    Integer.toString(AsyncExecutor.MAX_INFLIGHT_RPCS_DEFAULT))));
+        asyncExecutor = new AsyncExecutor(client, heapSizeManager);
+      }
+    }
+    
+    if ((getProperties().getProperty("debug") != null)
+        && (getProperties().getProperty("debug").compareTo("true") == 0)) {
+      debug = true;
+    }
+    
+    final String columnFamily = getProperties().getProperty("columnfamily");
+    if (columnFamily == null) {
+      System.err.println("Error, must specify a columnfamily for Bigtable table");
+      throw new DBException("No columnfamily specified");
+    }
+    columnFamilyBytes = Bytes.toBytes(columnFamily);
+  }
+  
+  @Override
+  public void cleanup() throws DBException {
+    if (asyncExecutor != null) {
+      try {
+        asyncExecutor.flush();
+      } catch (IOException e) {
+        throw new DBException(e);
+      }
+    }
+    synchronized (CONFIG) {
+      --threadCount;
+      if (threadCount <= 0) {
+        try {
+          session.close();
+        } catch (IOException e) {
+          throw new DBException(e);
+        }
+      }
+    }
+  }
+  
+  @Override
+  public Status read(String table, String key, Set<String> fields,
+      HashMap<String, ByteIterator> result) {
+    if (debug) {
+      System.out.println("Doing read from Bigtable columnfamily " 
+          + new String(columnFamilyBytes));
+      System.out.println("Doing read for key: " + key);
+    }
+    
+    setTable(table);
+    
+    RowFilter filter = RowFilter.newBuilder()
+        .setFamilyNameRegexFilterBytes(ByteStringer.wrap(columnFamilyBytes))
+        .build();
+    if (fields != null && fields.size() > 0) {
+      Builder filterChain = RowFilter.Chain.newBuilder();
+      filterChain.addFilters(filter);
+      filterChain.addFilters(RowFilter.newBuilder()
+          .setCellsPerColumnLimitFilter(1)
+          .build());
+      int count = 0;
+      // usually "field#" so pre-alloc
+      final StringBuilder regex = new StringBuilder(fields.size() * 6);
+      for (final String field : fields) {
+        if (count++ > 0) {
+          regex.append("|");
+        }
+        regex.append(field);
+      }
+      filterChain.addFilters(RowFilter.newBuilder()
+          .setColumnQualifierRegexFilter(
+              ByteStringer.wrap(regex.toString().getBytes()))).build();
+      filter = RowFilter.newBuilder().setChain(filterChain.build()).build();
+    }
+    
+    final ReadRowsRequest.Builder rrr = ReadRowsRequest.newBuilder()
+        .setTableNameBytes(ByteStringer.wrap(lastTableBytes))
+        .setFilter(filter)
+        .setRowKey(ByteStringer.wrap(key.getBytes()));
+    
+    List<Row> rows;
+    try {
+      rows = client.readRowsAsync(rrr.build()).get();
+      if (rows == null || rows.isEmpty()) {
+        return Status.NOT_FOUND;
+      }
+      for (final Row row : rows) {
+        for (final Family family : row.getFamiliesList()) {
+          if (Arrays.equals(family.getNameBytes().toByteArray(), columnFamilyBytes)) {
+            for (final Column column : family.getColumnsList()) {
+              // we should only have a single cell per column
+              result.put(column.getQualifier().toString(UTF8_CHARSET), 
+                  new ByteArrayByteIterator(column.getCells(0).getValue().toByteArray()));
+              if (debug) {
+                System.out.println(
+                    "Result for field: " + column.getQualifier().toString(UTF8_CHARSET)
+                        + " is: " + column.getCells(0).getValue().toString(UTF8_CHARSET));
+              }
+            }
+          }
+        }
+      }
+      
+      return Status.OK;
+    } catch (InterruptedException e) {
+      System.err.println("Interrupted during get: " + e);
+      Thread.currentThread().interrupt();
+      return Status.ERROR;
+    } catch (ExecutionException e) {
+      System.err.println("Exception during get: " + e);
+      return Status.ERROR;
+    }
+  }
+
+  @Override
+  public Status scan(String table, String startkey, int recordcount,
+      Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
+    setTable(table);
+    
+    RowFilter filter = RowFilter.newBuilder()
+        .setFamilyNameRegexFilterBytes(ByteStringer.wrap(columnFamilyBytes))
+        .build();
+    if (fields != null && fields.size() > 0) {
+      Builder filterChain = RowFilter.Chain.newBuilder();
+      filterChain.addFilters(filter);
+      filterChain.addFilters(RowFilter.newBuilder()
+          .setCellsPerColumnLimitFilter(1)
+          .build());
+      int count = 0;
+      // usually "field#" so pre-alloc
+      final StringBuilder regex = new StringBuilder(fields.size() * 6);
+      for (final String field : fields) {
+        if (count++ > 0) {
+          regex.append("|");
+        }
+        regex.append(field);
+      }
+      filterChain.addFilters(RowFilter.newBuilder()
+          .setColumnQualifierRegexFilter(
+              ByteStringer.wrap(regex.toString().getBytes()))).build();
+      filter = RowFilter.newBuilder().setChain(filterChain.build()).build();
+    }
+    
+    final RowRange range = RowRange.newBuilder()
+        .setStartKey(ByteStringer.wrap(startkey.getBytes()))
+        .build();
+    
+    final ReadRowsRequest.Builder rrr = ReadRowsRequest.newBuilder()
+        .setTableNameBytes(ByteStringer.wrap(lastTableBytes))
+        .setFilter(filter)
+        .setRowRange(range);
+    
+    List<Row> rows;
+    try {
+      rows = client.readRowsAsync(rrr.build()).get();
+      if (rows == null || rows.isEmpty()) {
+        return Status.NOT_FOUND;
+      }
+      int numResults = 0;
+      
+      for (final Row row : rows) {
+        final HashMap<String, ByteIterator> rowResult =
+            new HashMap<String, ByteIterator>(fields != null ? fields.size() : 10);
+        
+        for (final Family family : row.getFamiliesList()) {
+          if (Arrays.equals(family.getNameBytes().toByteArray(), columnFamilyBytes)) {
+            for (final Column column : family.getColumnsList()) {
+              // we should only have a single cell per column
+              rowResult.put(column.getQualifier().toString(UTF8_CHARSET), 
+                  new ByteArrayByteIterator(column.getCells(0).getValue().toByteArray()));
+              if (debug) {
+                System.out.println(
+                    "Result for field: " + column.getQualifier().toString(UTF8_CHARSET)
+                        + " is: " + column.getCells(0).getValue().toString(UTF8_CHARSET));
+              }
+            }
+          }
+        }
+        
+        result.add(rowResult);
+        
+        numResults++;
+        if (numResults >= recordcount) {// if hit recordcount, bail out
+          break;
+        }
+      }
+      return Status.OK;
+    } catch (InterruptedException e) {
+      System.err.println("Interrupted during scan: " + e);
+      Thread.currentThread().interrupt();
+      return Status.ERROR;
+    } catch (ExecutionException e) {
+      System.err.println("Exception during scan: " + e);
+      return Status.ERROR;
+    }
+  }
+
+  @Override
+  public Status update(String table, String key,
+      HashMap<String, ByteIterator> values) {
+    if (debug) {
+      System.out.println("Setting up put for key: " + key);
+    }
+    
+    setTable(table);
+    
+    final MutateRowRequest.Builder rowMutation = MutateRowRequest.newBuilder();
+    rowMutation.setRowKey(ByteString.copyFromUtf8(key));
+    rowMutation.setTableNameBytes(ByteStringer.wrap(lastTableBytes));
+    
+    for (final Entry<String, ByteIterator> entry : values.entrySet()) {
+      final Mutation.Builder mutationBuilder = rowMutation.addMutationsBuilder();
+      final SetCell.Builder setCellBuilder = mutationBuilder.getSetCellBuilder();
+      
+      setCellBuilder.setFamilyNameBytes(ByteStringer.wrap(columnFamilyBytes));
+      setCellBuilder.setColumnQualifier(ByteStringer.wrap(entry.getKey().getBytes()));
+      setCellBuilder.setValue(ByteStringer.wrap(entry.getValue().toArray()));
+      setCellBuilder.setTimestampMicros(System.nanoTime() / 1000);
+    }
+    
+    try {
+      if (clientSideBuffering) {
+        asyncExecutor.mutateRowAsync(rowMutation.build());
+      } else {
+        client.mutateRow(rowMutation.build());
+      }
+      return Status.OK;
+    } catch (ServiceException e) {
+      System.err.println("Failed to insert key: " + key + " " + e.getMessage());
+      return Status.ERROR;
+    } catch (InterruptedException e) {
+      System.err.println("Interrupted while inserting key: " + key + " " 
+          + e.getMessage());
+      Thread.currentThread().interrupt();
+      return Status.ERROR; // never get here, but lets make the compiler happy
+    }
+  }
+
+  @Override
+  public Status insert(String table, String key,
+      HashMap<String, ByteIterator> values) {
+    return update(table, key, values);
+  }
+
+  @Override
+  public Status delete(String table, String key) {
+    if (debug) {
+      System.out.println("Doing delete for key: " + key);
+    }
+    
+    setTable(table);
+    
+    final MutateRowRequest.Builder rowMutation = MutateRowRequest.newBuilder()
+        .setRowKey(ByteString.copyFromUtf8(key))
+        .setTableNameBytes(ByteStringer.wrap(lastTableBytes));
+    rowMutation.addMutationsBuilder().setDeleteFromRow(
+        DeleteFromRow.getDefaultInstance());
+    
+    try {
+      if (clientSideBuffering) {
+        asyncExecutor.mutateRowAsync(rowMutation.build());
+      } else {
+        client.mutateRow(rowMutation.build());
+      }
+      return Status.OK;
+    } catch (ServiceException e) {
+      System.err.println("Failed to delete key: " + key + " " + e.getMessage());
+      return Status.ERROR;
+    } catch (InterruptedException e) {
+      System.err.println("Interrupted while delete key: " + key + " " 
+          + e.getMessage());
+      Thread.currentThread().interrupt();
+      return Status.ERROR; // never get here, but lets make the compiler happy
+    }
+  }
+
+  /**
+   * Little helper to set the table byte array. If it's different than the last
+   * table we reset the byte array. Otherwise we just use the existing array.
+   * @param table The table we're operating against
+   */
+  private void setTable(final String table) {
+    if (!lastTable.equals(table)) {
+      lastTable = table;
+      lastTableBytes = options
+          .getClusterName()
+          .toTableName(table)
+          .toString()
+          .getBytes();
+    }
+  }
+  
+}
\ No newline at end of file
diff --git a/googlebigtable/src/main/java/com/yahoo/ycsb/db/package-info.java b/googlebigtable/src/main/java/com/yahoo/ycsb/db/package-info.java
new file mode 100644
index 0000000000000000000000000000000000000000..f0ab9e749b7ee9133db04737dcfe0986051ab745
--- /dev/null
+++ b/googlebigtable/src/main/java/com/yahoo/ycsb/db/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2016 YCSB contributors. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+
+/**
+ * The YCSB binding for Google's <a href="https://cloud.google.com/bigtable/">
+ * Bigtable</a>.
+ */
+package com.yahoo.ycsb.db;
diff --git a/googledatastore/pom.xml b/googledatastore/pom.xml
index beea713908bfcf81eba2dcdccd13a41403cebb2b..57db3505c288e6d27c309dbd19a0bcf1ba1fbd2e 100644
--- a/googledatastore/pom.xml
+++ b/googledatastore/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
diff --git a/hbase094/pom.xml b/hbase094/pom.xml
index 2622150d3ffd213c357cf8cc0ce9e7c1ecbb41e1..ca7d4c60f62525c841e16b1b98f947fedddfc4b5 100644
--- a/hbase094/pom.xml
+++ b/hbase094/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent/</relativePath>
   </parent>
 
diff --git a/hbase098/README.md b/hbase098/README.md
index e6a7fb4158f64c7d3c19e2c7e4c96a94a7cb612c..83c3c7a084743435736c88c6e840cc44e9829690 100644
--- a/hbase098/README.md
+++ b/hbase098/README.md
@@ -1,5 +1,5 @@
 <!--
-Copyright (c) 2015 YCSB contributors. All rights reserved.
+Copyright (c) 2015-2016 YCSB contributors. All rights reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License"); you
 may not use this file except in compliance with the License. You
@@ -77,3 +77,6 @@ Following options can be configurable using `-p`.
 * `principal`: If testing need to be done against a secure HBase cluster using Kerberos Keytab, 
   this property can be used to pass the principal in the keytab file.
 * `keytab`: The Kerberos keytab file name and location can be passed through this property.
+* `writebuffersize`: The maximum amount, in bytes, of data to buffer on the client side before a flush is forced. The default is 12MB.
+
+Additional HBase settings should be provided in the `hbase-site.xml` file located in your `/HBASE-HOME-DIR/conf` directory. Typically this will be `/etc/hbase/conf`.
\ No newline at end of file
diff --git a/hbase098/pom.xml b/hbase098/pom.xml
index 99f7805b9fba6992bbdf960fb4af6aa9f7811e9e..29600f7f28c5126af931f393d04ca8da04c051f6 100644
--- a/hbase098/pom.xml
+++ b/hbase098/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent/</relativePath>
   </parent>
 
diff --git a/hbase10/README.md b/hbase10/README.md
index 1da5bc4369aa552e2ae158c217fc11b044e8cff8..dd01249edbf169b6ab112af34c1a157dad4da067 100644
--- a/hbase10/README.md
+++ b/hbase10/README.md
@@ -1,5 +1,5 @@
 <!--
-Copyright (c) 2015 YCSB contributors. All rights reserved.
+Copyright (c) 2015-2016 YCSB contributors. All rights reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License"); you
 may not use this file except in compliance with the License. You
@@ -16,8 +16,101 @@ LICENSE file.
 -->
 
 # HBase (1.0.x) Driver for YCSB
-This driver is a binding for the YCSB facilities to operate against a HBase 1.0.x Server cluster.
+This driver is a binding for the YCSB facilities to operate against a HBase 1.0.x Server cluster or Google's hosted Bigtable.
 To run against an HBase 0.94.x cluster, use the `hbase094` binding.
 To run against an HBase 0.98.x cluster, use the `hbase098` binding.
 
-See `hbase098/README.md` for configuration details.
+See `hbase098/README.md` for a quickstart to setup HBase for load testing and common configuration details.
+
+## Configuration Options
+In addition to those options available for the `hbase098` binding, the following options are available for the `hbase10` binding:
+
+* `durability`: Whether or not writes should be appended to the WAL. Bypassing the WAL can improve throughput but data cannot be recovered in the event of a crash. The default is true.
+
+## Bigtable
+
+Google's Bigtable service provides an implementation of the HBase API for migrating existing applications. Users can perform load tests against Bigtable using this binding.
+
+### 1. Setup a Bigtable Cluster
+
+Login to the Google Cloud Console and follow the [Creating Cluster](https://cloud.google.com/bigtable/docs/creating-cluster) steps. Make a note of your cluster name, zone and project ID.
+
+### 2. Launch the Bigtable Shell
+
+From the Cloud Console, launch a shell and follow the [Quickstart](https://cloud.google.com/bigtable/docs/quickstart) up to step 4 where you launch the HBase shell.
+
+### 3. Create a Table
+
+For best results, use the pre-splitting strategy recommended in [HBASE-4163](https://issues.apache.org/jira/browse/HBASE-4163):
+
+```
+hbase(main):001:0> n_splits = 200 # HBase recommends (10 * number of regionservers)
+hbase(main):002:0> create 'usertable', 'cf', {SPLITS => (1..n_splits).map {|i| "user#{1000+i*(9999-1000)/n_splits}"}}
+```
+
+Make a note of the column family, in this example it's `cf``.
+
+### 4. Fetch the Proper ALPN Boot Jar
+
+The Bigtable protocol uses HTTP/2 which requires an ALPN protocol negotiation implementation. On JVM instantiation the implementation must be loaded before attempting to connect to the cluster. If you're using Java 7 or 8, use this [Jetty Version Table](http://www.eclipse.org/jetty/documentation/current/alpn-chapter.html#alpn-versions) to determine the version appropriate for your JVM. (ALPN is included in JDK 9+). Download the proper jar from [Maven](http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22org.mortbay.jetty.alpn%22%20AND%20a%3A%22alpn-boot%22) somewhere on your system.
+
+### 5. Download the Bigtable Client Jar
+
+Download one of the `bigtable-hbase-1.#` jars from [Maven](http://search.maven.org/#search%7Cga%7C1%7Ccom.google.cloud.bigtable) to your host.
+
+### 6. Download JSON Credentials
+
+Follow these instructions for [Generating a JSON key](https://cloud.google.com/bigtable/docs/installing-hbase-shell#service-account) and save it to your host.
+
+### 7. Create or Edit hbase-site.xml
+
+If you have an existing HBase configuration directory with an `hbase-site.xml` file, edit the file as per below. If not, create a directory called `conf` under the `hbase10` directory. Create a file in the conf directory named `hbase-site.xml`. Provide the following settings in the XML file, making sure to replace the bracketed examples with the proper values from your Cloud console.
+
+```
+<configuration>
+  <property>
+    <name>hbase.client.connection.impl</name>
+    <value>com.google.cloud.bigtable.hbase1_0.BigtableConnection</value>
+  </property>
+  <property>
+    <name>google.bigtable.cluster.name</name>
+    <value>[YOUR-CLUSTER-ID]</value>
+  </property>
+  <property>
+    <name>google.bigtable.project.id</name>
+    <value>[YOUR-PROJECT-ID]</value>
+  </property>
+  <property>
+    <name>google.bigtable.zone.name</name>
+    <value>[YOUR-ZONE-NAME]</value>
+  </property>
+  <property>
+    <name>google.bigtable.auth.service.account.enable</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>google.bigtable.auth.json.keyfile</name>
+    <value>[PATH-TO-YOUR-KEY-FILE]</value>
+  </property>
+</configuration>
+```
+
+If you wish to try other API implementations (1.1.x or 1.2.x) change the `hbase.client.connection.impl` appropriately to match the JAR you downloaded.
+
+If you have an existing HBase config directory, make sure to add it to the class path via `-cp <PATH_TO_BIGTABLE_JAR>:<CONF_DIR>`.
+
+### 8. Execute a Workload
+
+Switch to the root of the YCSB repo and choose the workload you want to run and `load` it first. With the CLI you must provide the column family, cluster properties and the ALPN jar to load.
+
+```
+bin/ycsb load hbase10 -p columnfamily=cf -cp <PATH_TO_BIGTABLE_JAR> -jvm-args='-Xbootclasspath/p:<PATH_TO_ALPN_JAR>' -P workloads/workloada
+
+```
+
+The `load` step only executes inserts into the datastore. After loading data, run the same workload to mix reads with writes.
+
+```
+bin/ycsb run hbase10 -p columnfamily=cf -jvm-args='-Xbootclasspath/p:<PATH_TO_ALPN_JAR>' -P workloads/workloada
+
+```
\ No newline at end of file
diff --git a/hbase10/pom.xml b/hbase10/pom.xml
index 5d782adf55ae68e57881502efcda0f5592d99716..3f6bec078debc77aaec15380ed744f326b2d5b30 100644
--- a/hbase10/pom.xml
+++ b/hbase10/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent/</relativePath>
   </parent>
 
diff --git a/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java b/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java
index a41c1987325afef791e0cc5b5bfa37f9aa737bef..da72f4f86c6d0101defde14c7548c49ea887f213 100644
--- a/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java
+++ b/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java
@@ -51,6 +51,7 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 import java.util.Vector;
+import java.util.concurrent.atomic.AtomicInteger;
 
 /**
  * HBase 1.0 client for YCSB framework.
@@ -63,14 +64,22 @@ import java.util.Vector;
  */
 public class HBaseClient10 extends com.yahoo.ycsb.DB {
   private Configuration config = HBaseConfiguration.create();
-  
-  // Must be an object for synchronization and tracking running thread counts. 
-  private static Integer threadCount = 0;
+
+  private static AtomicInteger threadCount = new AtomicInteger(0);
 
   private boolean debug = false;
 
   private String tableName = "";
+
+  /**
+   * A Cluster Connection instance that is shared by all running ycsb threads.
+   * Needs to be initialized late so we pick up command-line configs if any.
+   * To ensure one instance only in a multi-threaded context, guard access
+   * with a 'lock' object.
+   * @See #CONNECTION_LOCK.
+   */
   private static Connection connection = null;
+  private static final Object CONNECTION_LOCK = new Object();
 
   // Depending on the value of clientSideBuffering, either bufferedMutator
   // (clientSideBuffering) or currentTable (!clientSideBuffering) will be used.
@@ -121,10 +130,10 @@ public class HBaseClient10 extends com.yahoo.ycsb.DB {
       UserGroupInformation.setConfiguration(config);
     }
 
-    if ((getProperties().getProperty("principal")!=null) 
+    if ((getProperties().getProperty("principal")!=null)
         && (getProperties().getProperty("keytab")!=null)) {
       try {
-        UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"), 
+        UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"),
               getProperties().getProperty("keytab"));
       } catch (IOException e) {
         System.err.println("Keytab file is not readable or not found");
@@ -133,9 +142,10 @@ public class HBaseClient10 extends com.yahoo.ycsb.DB {
     }
 
     try {
-      synchronized(threadCount) {
-        ++threadCount;
+      threadCount.getAndIncrement();
+      synchronized (CONNECTION_LOCK) {
         if (connection == null) {
+          // Initialize if not set up already.
           connection = ConnectionFactory.createConnection(config);
         }
       }
@@ -166,7 +176,9 @@ public class HBaseClient10 extends com.yahoo.ycsb.DB {
     String table = com.yahoo.ycsb.workloads.CoreWorkload.table;
     try {
       final TableName tName = TableName.valueOf(table);
-      connection.getTable(tName).getTableDescriptor();
+      synchronized (CONNECTION_LOCK) {
+        connection.getTable(tName).getTableDescriptor();
+      }
     } catch (IOException e) {
       throw new DBException(e);
     }
@@ -193,11 +205,14 @@ public class HBaseClient10 extends com.yahoo.ycsb.DB {
       long en = System.nanoTime();
       final String type = clientSideBuffering ? "UPDATE" : "CLEANUP";
       measurements.measure(type, (int) ((en - st) / 1000));
-      synchronized(threadCount) {
-        --threadCount;
-        if (threadCount <= 0 && connection != null) {
-          connection.close();
-          connection = null;
+      threadCount.decrementAndGet();
+      if (threadCount.get() <= 0) {
+        // Means we are done so ok to shut down the Connection.
+        synchronized (CONNECTION_LOCK) {
+          if (connection != null) {
+            connection.close();
+            connection = null;
+          }
         }
       }
     } catch (IOException e) {
@@ -207,14 +222,13 @@ public class HBaseClient10 extends com.yahoo.ycsb.DB {
 
   public void getHTable(String table) throws IOException {
     final TableName tName = TableName.valueOf(table);
-    this.currentTable = this.connection.getTable(tName);
-    // suggestions from
-    // http://ryantwopointoh.blogspot.com/2009/01/
-    // performance-of-hbase-importing.html
-    if (clientSideBuffering) {
-      final BufferedMutatorParams p = new BufferedMutatorParams(tName);
-      p.writeBufferSize(writeBufferSize);
-      this.bufferedMutator = this.connection.getBufferedMutator(p);
+    synchronized (CONNECTION_LOCK) {
+      this.currentTable = connection.getTable(tName);
+      if (clientSideBuffering) {
+        final BufferedMutatorParams p = new BufferedMutatorParams(tName);
+        p.writeBufferSize(writeBufferSize);
+        this.bufferedMutator = connection.getBufferedMutator(p);
+      }
     }
   }
 
diff --git a/hypertable/pom.xml b/hypertable/pom.xml
index 8348fc0ba2d5f3405f0be4ca8860a3c00b145ba5..6c457f72777deb576ef40cf2dc160206d846b94f 100644
--- a/hypertable/pom.xml
+++ b/hypertable/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   
diff --git a/infinispan/pom.xml b/infinispan/pom.xml
index d191a0e97a6b39d5812fbe9aa3d68217238e8db2..943b1937e0b7051508905fdf31ab90aa6c04b3a8 100644
--- a/infinispan/pom.xml
+++ b/infinispan/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   
diff --git a/jdbc/pom.xml b/jdbc/pom.xml
index 3caee4733ca9dea078ac6fc4e83e42f1f4a12617..6777033698fdb5393eb47661694c4e1043c4d29c 100644
--- a/jdbc/pom.xml
+++ b/jdbc/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   
diff --git a/kudu/pom.xml b/kudu/pom.xml
index 867513d93b0cd62cdae21567d64374c962a40f23..f0d3088c06b67e88170ffa31fdf9c8136e0d4d53 100644
--- a/kudu/pom.xml
+++ b/kudu/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
diff --git a/mapkeeper/pom.xml b/mapkeeper/pom.xml
index 6786b96c94804c655400430b906d346e1abb7e8f..6fac3795c72b15599b2b848c803ea285efe8bc5c 100644
--- a/mapkeeper/pom.xml
+++ b/mapkeeper/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   
diff --git a/memcached/pom.xml b/memcached/pom.xml
index 706b406135a397a0a95fa91466b97f3571656768..10bcbbe9467d1341d951dd0ab6265702097feccb 100644
--- a/memcached/pom.xml
+++ b/memcached/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
diff --git a/mongodb/pom.xml b/mongodb/pom.xml
index ad287ac260857cc69e54d50ba3109eeeaf29fc65..f510d19aa272acceb9737be103c95f8e3f295ef2 100644
--- a/mongodb/pom.xml
+++ b/mongodb/pom.xml
@@ -22,7 +22,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
diff --git a/nosqldb/pom.xml b/nosqldb/pom.xml
index 9398ea408a59bedc60f53a92abeb7ed21fc67638..e2823102e4452796ffd6402efbf11aebde97c78c 100644
--- a/nosqldb/pom.xml
+++ b/nosqldb/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   
diff --git a/orientdb/pom.xml b/orientdb/pom.xml
index 3a8302cc68fb9ea112c13715d1f5ae0e12cd162b..db83a942d6955e12374d3b6055f6c7843c0a7e66 100644
--- a/orientdb/pom.xml
+++ b/orientdb/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
diff --git a/pom.xml b/pom.xml
index 35105dade94c94bbb455baf749db0593f45be03a..8261e9b32d084fe0a1ff04ed06bf97604bf71fa9 100644
--- a/pom.xml
+++ b/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
 
   <groupId>com.yahoo.ycsb</groupId>
   <artifactId>root</artifactId>
-  <version>0.8.0-SNAPSHOT</version>
+  <version>0.9.0-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <name>YCSB Root</name>
@@ -74,8 +74,9 @@ LICENSE file.
     <accumulo.version>1.6.0</accumulo.version>
     <cassandra.version>1.2.9</cassandra.version>
     <cassandra.cql.version>1.0.3</cassandra.cql.version>
-    <cassandra2.cql.version>2.1.8</cassandra2.cql.version>
+    <cassandra2.cql.version>3.0.0</cassandra2.cql.version>
     <geode.version>1.0.0-incubating.M1</geode.version>
+    <googlebigtable.version>0.2.3</googlebigtable.version>
     <infinispan.version>7.2.2.Final</infinispan.version>
     <kudu.version>0.6.0</kudu.version>
     <openjpa.jdbc.version>2.1.1</openjpa.jdbc.version>
@@ -90,6 +91,7 @@ LICENSE file.
     <thrift.version>0.8.0</thrift.version>
     <hypertable.version>0.9.5.6</hypertable.version>
     <couchbase.version>1.4.10</couchbase.version>
+    <couchbase2.version>2.2.6</couchbase2.version>
     <tarantool.version>1.6.5</tarantool.version>
     <aerospike.version>3.1.2</aerospike.version>
     <solr.version>5.4.0</solr.version>
@@ -105,10 +107,12 @@ LICENSE file.
     <module>cassandra</module>
     <module>cassandra2</module>
     <module>couchbase</module>
+    <module>couchbase2</module>
     <module>distribution</module>
     <module>dynamodb</module>
     <module>elasticsearch</module>
     <module>geode</module>
+    <module>googlebigtable</module>
     <module>googledatastore</module>
     <module>hbase094</module>
     <module>hbase098</module>
diff --git a/redis/pom.xml b/redis/pom.xml
index a958a0ecbf2fa0a6582e40e79a68aee1d1bd174e..65dbc50bf928e8e41333a48472d2f43f530f2fd5 100644
--- a/redis/pom.xml
+++ b/redis/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   
diff --git a/s3/pom.xml b/s3/pom.xml
index 44a288c4271df2c19a638bedaae9bf3d82ca967e..d5726a4621ffd67093a47c1498a0d6ac47758d32 100644
--- a/s3/pom.xml
+++ b/s3/pom.xml
@@ -19,7 +19,7 @@ LICENSE file.
     <parent>
         <groupId>com.yahoo.ycsb</groupId>
         <artifactId>binding-parent</artifactId>
-        <version>0.8.0-SNAPSHOT</version>
+        <version>0.9.0-SNAPSHOT</version>
         <relativePath>../binding-parent</relativePath>
     </parent>
   
diff --git a/solr/pom.xml b/solr/pom.xml
index 30929e88532c5a669ad56dfa86bfe1fa6f00cfb4..8253ea02dbf57303494a58cf09010da762cae15e 100644
--- a/solr/pom.xml
+++ b/solr/pom.xml
@@ -23,7 +23,7 @@ LICENSE file.
 	<parent>
 		<groupId>com.yahoo.ycsb</groupId>
 		<artifactId>binding-parent</artifactId>
-		<version>0.8.0-SNAPSHOT</version>
+		<version>0.9.0-SNAPSHOT</version>
 		<relativePath>../binding-parent</relativePath>
 	</parent>
 
diff --git a/tarantool/pom.xml b/tarantool/pom.xml
index b8818f586facfd87a50ac4266a6cafeeb620332a..acaea4ffc824e3640fe8afb08a415ac63cf2f07b 100644
--- a/tarantool/pom.xml
+++ b/tarantool/pom.xml
@@ -22,7 +22,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent/</relativePath>
   </parent>
 
diff --git a/voldemort/pom.xml b/voldemort/pom.xml
index f79f06e768cad69d7b5ddd4890e94989ce6b0698..f891a65a9f8b1e886c7a40fa64747a5d3f363c9e 100644
--- a/voldemort/pom.xml
+++ b/voldemort/pom.xml
@@ -22,7 +22,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.8.0-SNAPSHOT</version>
+    <version>0.9.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
    </parent>