diff --git a/.travis.yml b/.travis.yml
index aef900b74f1bd2a2303ff913688dba8bb54243b2..a34a295a70c7aab459368db2979a121ccc86e205 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -20,9 +20,10 @@
 language: java
 
 jdk:
+  - oraclejdk8
   - oraclejdk7
-  - openjdk7  
-  
+  - openjdk7
+
 install: mvn install -q -DskipTests=true
 
 script: mvn test -q
@@ -30,7 +31,8 @@ script: mvn test -q
 # Services to start for tests.
 services:
   - mongodb
+  - riak
 
 
 # Use the Container based infrastructure.
-sudo: false
+sudo: false
\ No newline at end of file
diff --git a/README.md b/README.md
index 834a6215ddfd44aae72f4ef30551050c9cd7ba68..020d64fa63a602a9140fa274cc9f4e22d1fb56e1 100644
--- a/README.md
+++ b/README.md
@@ -32,9 +32,9 @@ Getting Started
 1. Download the [latest release of YCSB](https://github.com/brianfrankcooper/YCSB/releases/latest):
 
     ```sh
-    curl -O --location https://github.com/brianfrankcooper/YCSB/releases/download/0.7.0/ycsb-0.7.0.tar.gz
-    tar xfvz ycsb-0.7.0.tar.gz
-    cd ycsb-0.7.0
+    curl -O --location https://github.com/brianfrankcooper/YCSB/releases/download/0.9.0/ycsb-0.9.0.tar.gz
+    tar xfvz ycsb-0.9.0.tar.gz
+    cd ycsb-0.9.0
     ```
     
 2. Set up a database to benchmark. There is a README file under each binding 
diff --git a/accumulo/pom.xml b/accumulo/pom.xml
index 08da0af6908b55296a6d3653607d49aad40d09f9..5a71fe2ad6a35a7868a1b490b685cb3bf1a4dd12 100644
--- a/accumulo/pom.xml
+++ b/accumulo/pom.xml
@@ -22,7 +22,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   <artifactId>accumulo-binding</artifactId>
diff --git a/aerospike/pom.xml b/aerospike/pom.xml
index 97a3213e74b433778aa0de2a607be3ee62178fa2..46a5b52a6713795c8bd5d8e6e95e3b4a12871e97 100644
--- a/aerospike/pom.xml
+++ b/aerospike/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
diff --git a/asynchbase/README.md b/asynchbase/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1a300c9bd2358e7aec03d09f0c51d36232b039b2
--- /dev/null
+++ b/asynchbase/README.md
@@ -0,0 +1,59 @@
+<!--
+Copyright (c) 2016 YCSB contributors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you
+may not use this file except in compliance with the License. You
+may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied. See the License for the specific language governing
+permissions and limitations under the License. See accompanying
+LICENSE file.
+-->
+
+# AsyncHBase Driver for YCSB
+
+This driver provides a YCSB workload binding for Apache HBase using an alternative to the included HBase client. AsyncHBase is completely asynchronous for all operations and is particularly useful for write heavy workloads. Note that it supports a subset of the HBase client APIs but supports all public released versions of HBase.
+
+## Quickstart
+
+### 1. Setup Hbase
+
+Follow directions 1 to 3 from ``hbase098``'s readme.
+
+### 2. Load a Workload
+
+Switch to the root of the YCSB repo and choose the workload you want to run and `load` it first. With the CLI you must provide the column family at a minimum if HBase is running on localhost. Otherwise you must provide connection properties via CLI or the path to a config file. Additional configuration parameters are available below.
+
+```
+bin/ycsb load asynchbase -p columnfamily=cf -P workloads/workloada
+
+```
+
+The `load` step only executes inserts into the datastore. After loading data, run the same workload to mix reads with writes.
+
+```
+bin/ycsb run asynchbase -p columnfamily=cf -P workloads/workloada
+
+```
+
+## Configuration Options
+
+The following options can be configured using CLI (using the `-p` parameter) or via a JAVA style properties configuration file.. Check the [AsyncHBase Configuration](http://opentsdb.github.io/asynchbase/docs/build/html/configuration.html) project for additional tuning parameters.
+
+* `columnfamily`: (Required) The column family to target.
+* `config`: Optional full path to a configuration file with AsyncHBase options.
+* `hbase.zookeeper.quorum`: Zookeeper quorum list.
+* `hbase.zookeeper.znode.parent`: Path used by HBase in Zookeeper. Default is "/hbase".
+* `debug`: If true, prints debug information to standard out. The default is false.
+* `clientbuffering`: Whether or not to use client side buffering and batching of write operations. This can significantly improve performance and defaults to true.
+* `durable`: When set to false, writes and deletes bypass the WAL for quicker responses. Default is true.
+* `jointimeout`: A timeout value, in milliseconds, for waiting on operations synchronously before an error is thrown.
+* `prefetchmeta`: Whether or not to read meta for all regions in the table and connect to the proper region servers before starting operations. Defaults to false.
+
+
+Note: This module includes some Google Guava source files from version 12 that were later removed but are still required by HBase's test modules for setting up the mini cluster during integration testing.
\ No newline at end of file
diff --git a/asynchbase/pom.xml b/asynchbase/pom.xml
new file mode 100644
index 0000000000000000000000000000000000000000..4162e26579374c37f925725694d44605dd7e85a1
--- /dev/null
+++ b/asynchbase/pom.xml
@@ -0,0 +1,105 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2016 YCSB contributors. All rights reserved.
+Licensed under the Apache License, Version 2.0 (the "License"); you
+may not use this file except in compliance with the License. You
+may obtain a copy of the License at
+http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied. See the License for the specific language governing
+permissions and limitations under the License. See accompanying
+LICENSE file.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>com.yahoo.ycsb</groupId>
+    <artifactId>binding-parent</artifactId>
+    <version>0.10.0-SNAPSHOT</version>
+    <relativePath>../binding-parent/</relativePath>
+  </parent>
+
+  <artifactId>asynchbase-binding</artifactId>
+  <name>AsyncHBase Client Binding for Apache HBase</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.hbase</groupId>
+      <artifactId>asynchbase</artifactId>
+      <version>${asynchbase.version}</version>
+    </dependency>
+    
+    <dependency>
+      <groupId>com.yahoo.ycsb</groupId>
+      <artifactId>core</artifactId>
+      <version>${project.version}</version>
+      <scope>provided</scope>
+    </dependency>
+    
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+      <version>3.4.5</version>
+      <exclusions>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>jline</groupId>
+          <artifactId>jline</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.jboss.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>4.12</version>
+      <scope>test</scope>
+    </dependency>
+    
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-testing-util</artifactId>
+      <version>${hbase10.version}</version>
+      <scope>test</scope>
+    </dependency>
+    
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-client</artifactId>
+      <version>${hbase10.version}</version>
+      <scope>test</scope>
+    </dependency>
+    
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+      <version>1.2.17</version>
+      <scope>test</scope>
+    </dependency>
+    
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>log4j-over-slf4j</artifactId>
+      <version>1.7.7</version>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+</project>
\ No newline at end of file
diff --git a/asynchbase/src/main/java/com/yahoo/ycsb/db/AsyncHBaseClient.java b/asynchbase/src/main/java/com/yahoo/ycsb/db/AsyncHBaseClient.java
new file mode 100644
index 0000000000000000000000000000000000000000..fddd1a7aef3b9be60178d7d36cb1852b547b8086
--- /dev/null
+++ b/asynchbase/src/main/java/com/yahoo/ycsb/db/AsyncHBaseClient.java
@@ -0,0 +1,409 @@
+/**
+ * Copyright (c) 2016 YCSB contributors. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+package com.yahoo.ycsb.db;
+
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.Vector;
+
+import org.hbase.async.Bytes;
+import org.hbase.async.Config;
+import org.hbase.async.DeleteRequest;
+import org.hbase.async.GetRequest;
+import org.hbase.async.HBaseClient;
+import org.hbase.async.KeyValue;
+import org.hbase.async.PutRequest;
+import org.hbase.async.Scanner;
+
+import com.yahoo.ycsb.ByteArrayByteIterator;
+import com.yahoo.ycsb.ByteIterator;
+import com.yahoo.ycsb.DBException;
+import com.yahoo.ycsb.Status;
+
+/**
+ * Alternative Java client for Apache HBase.
+ * 
+ * This client provides a subset of the main HBase client and uses a completely
+ * asynchronous pipeline for all calls. It is particularly useful for write heavy
+ * workloads. It is also compatible with all production versions of HBase. 
+ */
+public class AsyncHBaseClient extends com.yahoo.ycsb.DB {
+  public static final Charset UTF8_CHARSET = Charset.forName("UTF8");
+  private static final String CLIENT_SIDE_BUFFERING_PROPERTY = "clientbuffering";
+  private static final String DURABILITY_PROPERTY = "durability";
+  private static final String PREFETCH_META_PROPERTY = "prefetchmeta";
+  private static final String CONFIG_PROPERTY = "config";
+  private static final String COLUMN_FAMILY_PROPERTY = "columnfamily";
+  private static final String JOIN_TIMEOUT_PROPERTY = "jointimeout";
+  private static final String JOIN_TIMEOUT_PROPERTY_DEFAULT = "30000";
+  
+  /** Mutex for instantiating a single instance of the client. */
+  private static final Object MUTEX = new Object();
+  
+  /** Use for tracking running thread counts so we know when to shutdown the client. */ 
+  private static int threadCount = 0;
+  
+  /** The client that's used for all threads. */
+  private static HBaseClient client;
+  
+  /** Print debug information to standard out. */
+  private boolean debug = false;
+  
+  /** The column family use for the workload. */
+  private byte[] columnFamilyBytes;
+  
+  /** Cache for the last table name/ID to avoid byte conversions. */
+  private String lastTable = "";
+  private byte[] lastTableBytes;
+  
+  private long joinTimeout;
+  
+  /** Whether or not to bypass the WAL for puts and deletes. */
+  private boolean durability = true;
+  
+  /**
+   * If true, buffer mutations on the client. This is the default behavior for
+   * AsyncHBase. For measuring insert/update/delete latencies, client side
+   * buffering should be disabled.
+   * 
+   * A single instance of this 
+   */
+  private boolean clientSideBuffering = false;
+  
+  @Override
+  public void init() throws DBException {
+    if (getProperties().getProperty(CLIENT_SIDE_BUFFERING_PROPERTY, "false")
+        .toLowerCase().equals("true")) {
+      clientSideBuffering = true;
+    }
+    if (getProperties().getProperty(DURABILITY_PROPERTY, "true")
+        .toLowerCase().equals("false")) {
+      durability = false;
+    }
+    final String columnFamily = getProperties().getProperty(COLUMN_FAMILY_PROPERTY);
+    if (columnFamily == null || columnFamily.isEmpty()) {
+      System.err.println("Error, must specify a columnfamily for HBase table");
+      throw new DBException("No columnfamily specified");
+    }
+    columnFamilyBytes = columnFamily.getBytes();
+    
+    if ((getProperties().getProperty("debug") != null)
+        && (getProperties().getProperty("debug").compareTo("true") == 0)) {
+      debug = true;
+    }
+    
+    joinTimeout = Integer.parseInt(getProperties().getProperty(
+        JOIN_TIMEOUT_PROPERTY, JOIN_TIMEOUT_PROPERTY_DEFAULT));
+    
+    final boolean prefetchMeta = getProperties()
+        .getProperty(PREFETCH_META_PROPERTY, "false")
+        .toLowerCase().equals("true") ? true : false;
+    try {
+      synchronized (MUTEX) {
+        ++threadCount;
+        if (client == null) {
+          final String configPath = getProperties().getProperty(CONFIG_PROPERTY);
+          final Config config;
+          if (configPath == null || configPath.isEmpty()) {
+            config = new Config();
+            final Iterator<Entry<Object, Object>> iterator = getProperties()
+                 .entrySet().iterator();
+            while (iterator.hasNext()) {
+              final Entry<Object, Object> property = iterator.next();
+              config.overrideConfig((String)property.getKey(), 
+                  (String)property.getValue());
+            }
+          } else {
+            config = new Config(configPath);
+          }
+          client = new HBaseClient(config);
+          
+          // Terminate right now if table does not exist, since the client
+          // will not propagate this error upstream once the workload
+          // starts.
+          String table = com.yahoo.ycsb.workloads.CoreWorkload.table;
+          try {
+            client.ensureTableExists(table).join(joinTimeout);
+          } catch (InterruptedException e1) {
+            Thread.currentThread().interrupt();
+          } catch (Exception e) {
+            throw new DBException(e);
+          }
+          
+          if (prefetchMeta) {
+            try {
+              if (debug) {
+                System.out.println("Starting meta prefetch for table " + table);
+              }
+              client.prefetchMeta(table).join(joinTimeout);
+              if (debug) {
+                System.out.println("Completed meta prefetch for table " + table);
+              }
+            } catch (InterruptedException e) {
+              System.err.println("Interrupted during prefetch");
+              Thread.currentThread().interrupt();
+            } catch (Exception e) {
+              throw new DBException("Failed prefetch", e);
+            }
+          }
+        }
+      }
+    } catch (IOException e) {
+      throw new DBException("Failed instantiation of client", e);
+    }
+  }
+  
+  @Override
+  public void cleanup() throws DBException {
+    synchronized (MUTEX) {
+      --threadCount;
+      if (client != null && threadCount < 1) {
+        try {
+          if (debug) {
+            System.out.println("Shutting down client");
+          }
+          client.shutdown().joinUninterruptibly(joinTimeout);
+        } catch (Exception e) {
+          System.err.println("Failed to shutdown the AsyncHBase client "
+              + "properly: " + e.getMessage());
+        }
+        client = null;
+      }
+    }
+  }
+  
+  @Override
+  public Status read(String table, String key, Set<String> fields,
+      HashMap<String, ByteIterator> result) {
+    setTable(table);
+    
+    final GetRequest get = new GetRequest(
+        lastTableBytes, key.getBytes(), columnFamilyBytes);
+    if (fields != null) {
+      get.qualifiers(getQualifierList(fields));
+    }
+    
+    try {
+      if (debug) {
+        System.out.println("Doing read from HBase columnfamily " + 
+            Bytes.pretty(columnFamilyBytes));
+        System.out.println("Doing read for key: " + key);
+      }
+      
+      final ArrayList<KeyValue> row = client.get(get).join(joinTimeout);
+      if (row == null || row.isEmpty()) {
+        return Status.NOT_FOUND;
+      }
+      
+      // got something so populate the results
+      for (final KeyValue column : row) {
+        result.put(new String(column.qualifier()), 
+            // TODO - do we need to clone this array? YCSB may keep it in memory
+            // for a while which would mean the entire KV would hang out and won't
+            // be GC'd.
+            new ByteArrayByteIterator(column.value()));
+        
+        if (debug) {
+          System.out.println(
+              "Result for field: " + Bytes.pretty(column.qualifier())
+                  + " is: " + Bytes.pretty(column.value()));
+        }
+      }
+      return Status.OK;
+    } catch (InterruptedException e) {
+      System.err.println("Thread interrupted");
+      Thread.currentThread().interrupt();
+    } catch (Exception e) {
+      System.err.println("Failure reading from row with key " + key + 
+          ": " + e.getMessage());
+      return Status.ERROR;
+    }
+    return Status.ERROR;
+  }
+
+  @Override
+  public Status scan(String table, String startkey, int recordcount,
+      Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
+    setTable(table);
+    
+    final Scanner scanner = client.newScanner(lastTableBytes);
+    scanner.setFamily(columnFamilyBytes);
+    scanner.setStartKey(startkey.getBytes(UTF8_CHARSET));
+    // No end key... *sniff*
+    if (fields != null) {
+      scanner.setQualifiers(getQualifierList(fields));
+    }
+    
+    // no filters? *sniff*
+    ArrayList<ArrayList<KeyValue>> rows = null;
+    try {
+      int numResults = 0;
+      while ((rows = scanner.nextRows().join(joinTimeout)) != null) {
+        for (final ArrayList<KeyValue> row : rows) {
+          final HashMap<String, ByteIterator> rowResult =
+              new HashMap<String, ByteIterator>(row.size());
+          for (final KeyValue column : row) {
+            rowResult.put(new String(column.qualifier()), 
+                // TODO - do we need to clone this array? YCSB may keep it in memory
+                // for a while which would mean the entire KV would hang out and won't
+                // be GC'd.
+                new ByteArrayByteIterator(column.value()));
+            if (debug) {
+              System.out.println("Got scan result for key: " + 
+                  Bytes.pretty(column.key()));
+            }
+          }
+          result.add(rowResult);
+          numResults++;
+
+          if (numResults >= recordcount) {// if hit recordcount, bail out
+            break;
+          }
+        }
+      }
+      scanner.close().join(joinTimeout);
+      return Status.OK;
+    } catch (InterruptedException e) {
+      System.err.println("Thread interrupted");
+      Thread.currentThread().interrupt();
+    } catch (Exception e) {
+      System.err.println("Failure reading from row with key " + startkey + 
+          ": " + e.getMessage());
+      return Status.ERROR;
+    }
+    
+    return Status.ERROR;
+  }
+
+  @Override
+  public Status update(String table, String key,
+      HashMap<String, ByteIterator> values) {
+    setTable(table);
+    
+    if (debug) {
+      System.out.println("Setting up put for key: " + key);
+    }
+    
+    final byte[][] qualifiers = new byte[values.size()][];
+    final byte[][] byteValues = new byte[values.size()][];
+    
+    int idx = 0;
+    for (final Entry<String, ByteIterator> entry : values.entrySet()) {
+      qualifiers[idx] = entry.getKey().getBytes();
+      byteValues[idx++] = entry.getValue().toArray();
+      if (debug) {
+        System.out.println("Adding field/value " + entry.getKey() + "/"
+            + Bytes.pretty(entry.getValue().toArray()) + " to put request");
+      }
+    }
+    
+    final PutRequest put = new PutRequest(lastTableBytes, key.getBytes(), 
+        columnFamilyBytes, qualifiers, byteValues);
+    if (!durability) {
+      put.setDurable(false);
+    }
+    if (!clientSideBuffering) {
+      put.setBufferable(false);
+      try {
+        client.put(put).join(joinTimeout);
+      } catch (InterruptedException e) {
+        System.err.println("Thread interrupted");
+        Thread.currentThread().interrupt();
+      } catch (Exception e) {
+        System.err.println("Failure reading from row with key " + key + 
+            ": " + e.getMessage());
+        return Status.ERROR;
+      }
+    } else {
+      // hooray! Asynchronous write. But without a callback and an async
+      // YCSB call we don't know whether it succeeded or not
+      client.put(put);
+    }
+    
+    return Status.OK;
+  }
+
+  @Override
+  public Status insert(String table, String key,
+      HashMap<String, ByteIterator> values) {
+    return update(table, key, values);
+  }
+
+  @Override
+  public Status delete(String table, String key) {
+    setTable(table);
+    
+    if (debug) {
+      System.out.println("Doing delete for key: " + key);
+    }
+    
+    final DeleteRequest delete = new DeleteRequest(
+        lastTableBytes, key.getBytes(), columnFamilyBytes);
+    if (!durability) {
+      delete.setDurable(false);
+    }
+    if (!clientSideBuffering) {
+      delete.setBufferable(false);
+      try {
+        client.delete(delete).join(joinTimeout);
+      } catch (InterruptedException e) {
+        System.err.println("Thread interrupted");
+        Thread.currentThread().interrupt();
+      } catch (Exception e) {
+        System.err.println("Failure reading from row with key " + key + 
+            ": " + e.getMessage());
+        return Status.ERROR;
+      }
+    } else {
+      // hooray! Asynchronous write. But without a callback and an async
+      // YCSB call we don't know whether it succeeded or not
+      client.delete(delete);
+    }
+    return Status.OK;
+  }
+
+  /**
+   * Little helper to set the table byte array. If it's different than the last
+   * table we reset the byte array. Otherwise we just use the existing array.
+   * @param table The table we're operating against
+   */
+  private void setTable(final String table) {
+    if (!lastTable.equals(table)) {
+      lastTable = table;
+      lastTableBytes = table.getBytes();
+    }
+  }
+  
+  /**
+   * Little helper to build a qualifier byte array from a field set.
+   * @param fields The fields to fetch.
+   * @return The column qualifier byte arrays.
+   */
+  private byte[][] getQualifierList(final Set<String> fields) {
+    final byte[][] qualifiers = new byte[fields.size()][];
+    int idx = 0;
+    for (final String field : fields) {
+      qualifiers[idx++] = field.getBytes();
+    }
+    return qualifiers;
+  }
+}
\ No newline at end of file
diff --git a/asynchbase/src/main/java/com/yahoo/ycsb/db/package-info.java b/asynchbase/src/main/java/com/yahoo/ycsb/db/package-info.java
new file mode 100644
index 0000000000000000000000000000000000000000..72faa5016162c0e8d69f866c5d8f8ab0a8929f86
--- /dev/null
+++ b/asynchbase/src/main/java/com/yahoo/ycsb/db/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Copyright (c) 2016 YCSB contributors. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+
+/**
+ * The YCSB binding for HBase using the AsyncHBase client.
+ */
+package com.yahoo.ycsb.db;
diff --git a/asynchbase/src/test/java/com/google/common/base/Stopwatch.java b/asynchbase/src/test/java/com/google/common/base/Stopwatch.java
new file mode 100644
index 0000000000000000000000000000000000000000..4d46924bda1ce1f78dafa50cce93b0f70634b13b
--- /dev/null
+++ b/asynchbase/src/test/java/com/google/common/base/Stopwatch.java
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2008 The Guava Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.common.base;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.concurrent.TimeUnit.MICROSECONDS;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static java.util.concurrent.TimeUnit.SECONDS;
+
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.GwtCompatible;
+import com.google.common.annotations.GwtIncompatible;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * An object that measures elapsed time in nanoseconds. It is useful to measure
+ * elapsed time using this class instead of direct calls to {@link
+ * System#nanoTime} for a few reasons:
+ *
+ * <ul>
+ * <li>An alternate time source can be substituted, for testing or performance
+ *     reasons.
+ * <li>As documented by {@code nanoTime}, the value returned has no absolute
+ *     meaning, and can only be interpreted as relative to another timestamp
+ *     returned by {@code nanoTime} at a different time. {@code Stopwatch} is a
+ *     more effective abstraction because it exposes only these relative values,
+ *     not the absolute ones.
+ * </ul>
+ *
+ * <p>Basic usage:
+ * <pre>
+ *   Stopwatch stopwatch = Stopwatch.{@link #createStarted createStarted}();
+ *   doSomething();
+ *   stopwatch.{@link #stop stop}(); // optional
+ *
+ *   long millis = stopwatch.elapsed(MILLISECONDS);
+ *
+ *   log.info("that took: " + stopwatch); // formatted string like "12.3 ms"
+ * </pre>
+ *
+ * <p>Stopwatch methods are not idempotent; it is an error to start or stop a
+ * stopwatch that is already in the desired state.
+ *
+ * <p>When testing code that uses this class, use the {@linkplain
+ * #Stopwatch(Ticker) alternate constructor} to supply a fake or mock ticker.
+ * <!-- TODO(kevinb): restore the "such as" --> This allows you to
+ * simulate any valid behavior of the stopwatch.
+ *
+ * <p><b>Note:</b> This class is not thread-safe.
+ *
+ * @author Kevin Bourrillion
+ * @since 10.0
+ */
+@Beta
+@GwtCompatible(emulated = true)
+public final class Stopwatch {
+  private final Ticker ticker;
+  private boolean isRunning;
+  private long elapsedNanos;
+  private long startTick;
+
+  /**
+   * Creates (but does not start) a new stopwatch using {@link System#nanoTime}
+   * as its time source.
+   *
+   * @since 15.0
+   */
+  public static Stopwatch createUnstarted() {
+    return new Stopwatch();
+  }
+
+  /**
+   * Creates (but does not start) a new stopwatch, using the specified time
+   * source.
+   *
+   * @since 15.0
+   */
+  public static Stopwatch createUnstarted(Ticker ticker) {
+    return new Stopwatch(ticker);
+  }
+
+  /**
+   * Creates (and starts) a new stopwatch using {@link System#nanoTime}
+   * as its time source.
+   *
+   * @since 15.0
+   */
+  public static Stopwatch createStarted() {
+    return new Stopwatch().start();
+  }
+
+  /**
+   * Creates (and starts) a new stopwatch, using the specified time
+   * source.
+   *
+   * @since 15.0
+   */
+  public static Stopwatch createStarted(Ticker ticker) {
+    return new Stopwatch(ticker).start();
+  }
+
+  /**
+   * Creates (but does not start) a new stopwatch using {@link System#nanoTime}
+   * as its time source.
+   *
+   * @deprecated Use {@link Stopwatch#createUnstarted()} instead.
+   */
+  @Deprecated
+  public Stopwatch() {
+    this(Ticker.systemTicker());
+  }
+
+  /**
+   * Creates (but does not start) a new stopwatch, using the specified time
+   * source.
+   *
+   * @deprecated Use {@link Stopwatch#createUnstarted(Ticker)} instead.
+   */
+  @Deprecated
+  public Stopwatch(Ticker ticker) {
+    this.ticker = checkNotNull(ticker, "ticker");
+  }
+
+  /**
+   * Returns {@code true} if {@link #start()} has been called on this stopwatch,
+   * and {@link #stop()} has not been called since the last call to {@code
+   * start()}.
+   */
+  public boolean isRunning() {
+    return isRunning;
+  }
+
+  /**
+   * Starts the stopwatch.
+   *
+   * @return this {@code Stopwatch} instance
+   * @throws IllegalStateException if the stopwatch is already running.
+   */
+  public Stopwatch start() {
+    checkState(!isRunning, "This stopwatch is already running.");
+    isRunning = true;
+    startTick = ticker.read();
+    return this;
+  }
+
+  /**
+   * Stops the stopwatch. Future reads will return the fixed duration that had
+   * elapsed up to this point.
+   *
+   * @return this {@code Stopwatch} instance
+   * @throws IllegalStateException if the stopwatch is already stopped.
+   */
+  public Stopwatch stop() {
+    long tick = ticker.read();
+    checkState(isRunning, "This stopwatch is already stopped.");
+    isRunning = false;
+    elapsedNanos += tick - startTick;
+    return this;
+  }
+
+  /**
+   * Sets the elapsed time for this stopwatch to zero,
+   * and places it in a stopped state.
+   *
+   * @return this {@code Stopwatch} instance
+   */
+  public Stopwatch reset() {
+    elapsedNanos = 0;
+    isRunning = false;
+    return this;
+  }
+
+  private long elapsedNanos() {
+    return isRunning ? ticker.read() - startTick + elapsedNanos : elapsedNanos;
+  }
+
+  /**
+   * Returns the current elapsed time shown on this stopwatch, expressed
+   * in the desired time unit, with any fraction rounded down.
+   *
+   * <p>Note that the overhead of measurement can be more than a microsecond, so
+   * it is generally not useful to specify {@link TimeUnit#NANOSECONDS}
+   * precision here.
+   *
+   * @since 14.0 (since 10.0 as {@code elapsedTime()})
+   */
+  public long elapsed(TimeUnit desiredUnit) {
+    return desiredUnit.convert(elapsedNanos(), NANOSECONDS);
+  }
+
+  /**
+   * Returns the current elapsed time shown on this stopwatch, expressed
+   * in the desired time unit, with any fraction rounded down.
+   *
+   * <p>Note that the overhead of measurement can be more than a microsecond, so
+   * it is generally not useful to specify {@link TimeUnit#NANOSECONDS}
+   * precision here.
+   *
+   * @deprecated Use {@link Stopwatch#elapsed(TimeUnit)} instead. This method is
+   *     scheduled to be removed in Guava release 16.0.
+   */
+  @Deprecated
+  public long elapsedTime(TimeUnit desiredUnit) {
+    return elapsed(desiredUnit);
+  }
+
+  /**
+   * Returns the current elapsed time shown on this stopwatch, expressed
+   * in milliseconds, with any fraction rounded down. This is identical to
+   * {@code elapsed(TimeUnit.MILLISECONDS)}.
+   *
+   * @deprecated Use {@code stopwatch.elapsed(MILLISECONDS)} instead. This
+   *     method is scheduled to be removed in Guava release 16.0.
+   */
+  @Deprecated
+  public long elapsedMillis() {
+    return elapsed(MILLISECONDS);
+  }
+
+  /**
+   * Returns a string representation of the current elapsed time.
+   */
+  @GwtIncompatible("String.format()")
+  @Override public String toString() {
+    long nanos = elapsedNanos();
+
+    TimeUnit unit = chooseUnit(nanos);
+    double value = (double) nanos / NANOSECONDS.convert(1, unit);
+
+    // Too bad this functionality is not exposed as a regular method call
+    return String.format("%.4g %s", value, abbreviate(unit));
+  }
+
+  private static TimeUnit chooseUnit(long nanos) {
+    if (SECONDS.convert(nanos, NANOSECONDS) > 0) {
+      return SECONDS;
+    }
+    if (MILLISECONDS.convert(nanos, NANOSECONDS) > 0) {
+      return MILLISECONDS;
+    }
+    if (MICROSECONDS.convert(nanos, NANOSECONDS) > 0) {
+      return MICROSECONDS;
+    }
+    return NANOSECONDS;
+  }
+
+  private static String abbreviate(TimeUnit unit) {
+    switch (unit) {
+      case NANOSECONDS:
+        return "ns";
+      case MICROSECONDS:
+        return "\u03bcs"; // μs
+      case MILLISECONDS:
+        return "ms";
+      case SECONDS:
+        return "s";
+      default:
+        throw new AssertionError();
+    }
+  }
+}
\ No newline at end of file
diff --git a/asynchbase/src/test/java/com/google/common/io/Closeables.java b/asynchbase/src/test/java/com/google/common/io/Closeables.java
new file mode 100644
index 0000000000000000000000000000000000000000..4a92c9c09883709deb66f625d9cd3ade79f9a9a2
--- /dev/null
+++ b/asynchbase/src/test/java/com/google/common/io/Closeables.java
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2007 The Guava Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.common.io;
+
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.VisibleForTesting;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import javax.annotation.Nullable;
+
+/**
+ * Utility methods for working with {@link Closeable} objects.
+ *
+ * @author Michael Lancaster
+ * @since 1.0
+ */
+@Beta
+public final class Closeables {
+  @VisibleForTesting static final Logger logger
+      = Logger.getLogger(Closeables.class.getName());
+
+  private Closeables() {}
+
+  /**
+   * Closes a {@link Closeable}, with control over whether an
+   * {@code IOException} may be thrown. This is primarily useful in a
+   * finally block, where a thrown exception needs to be logged but not
+   * propagated (otherwise the original exception will be lost).
+   *
+   * <p>If {@code swallowIOException} is true then we never throw
+   * {@code IOException} but merely log it.
+   *
+   * <p>Example:
+   *
+   * <p><pre>public void useStreamNicely() throws IOException {
+   * SomeStream stream = new SomeStream("foo");
+   * boolean threw = true;
+   * try {
+   *   // Some code which does something with the Stream. May throw a
+   *   // Throwable.
+   *   threw = false; // No throwable thrown.
+   * } finally {
+   *   // Close the stream.
+   *   // If an exception occurs, only rethrow it if (threw==false).
+   *   Closeables.close(stream, threw);
+   * }
+   * </pre>
+   *
+   * @param closeable the {@code Closeable} object to be closed, or null,
+   *     in which case this method does nothing
+   * @param swallowIOException if true, don't propagate IO exceptions
+   *     thrown by the {@code close} methods
+   * @throws IOException if {@code swallowIOException} is false and
+   *     {@code close} throws an {@code IOException}.
+   */
+  public static void close(@Nullable Closeable closeable,
+      boolean swallowIOException) throws IOException {
+    if (closeable == null) {
+      return;
+    }
+    try {
+      closeable.close();
+    } catch (IOException e) {
+      if (swallowIOException) {
+        logger.log(Level.WARNING,
+            "IOException thrown while closing Closeable.", e);
+      } else {
+        throw e;
+      }
+    }
+  }
+
+  /**
+   * Equivalent to calling {@code close(closeable, true)}, but with no
+   * IOException in the signature.
+   * @param closeable the {@code Closeable} object to be closed, or null, in
+   *      which case this method does nothing
+   */
+  public static void closeQuietly(@Nullable Closeable closeable) {
+    try {
+      close(closeable, true);
+    } catch (IOException e) {
+      logger.log(Level.SEVERE, "IOException should not have been thrown.", e);
+    }
+  }
+}
\ No newline at end of file
diff --git a/asynchbase/src/test/java/com/google/common/io/LimitInputStream.java b/asynchbase/src/test/java/com/google/common/io/LimitInputStream.java
new file mode 100644
index 0000000000000000000000000000000000000000..a529f5e127fc422cf287693f18da105fbe78aacc
--- /dev/null
+++ b/asynchbase/src/test/java/com/google/common/io/LimitInputStream.java
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2007 The Guava Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.common.io;
+
+import com.google.common.annotations.Beta;
+import com.google.common.base.Preconditions;
+
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * An InputStream that limits the number of bytes which can be read.
+ *
+ * @author Charles Fry
+ * @since 1.0
+ */
+@Beta
+public final class LimitInputStream extends FilterInputStream {
+
+  private long left;
+  private long mark = -1;
+
+  /**
+   * Wraps another input stream, limiting the number of bytes which can be read.
+   *
+   * @param in the input stream to be wrapped
+   * @param limit the maximum number of bytes to be read
+   */
+  public LimitInputStream(InputStream in, long limit) {
+    super(in);
+    Preconditions.checkNotNull(in);
+    Preconditions.checkArgument(limit >= 0, "limit must be non-negative");
+    left = limit;
+  }
+
+  @Override public int available() throws IOException {
+    return (int) Math.min(in.available(), left);
+  }
+
+  @Override public synchronized void mark(int readlimit) {
+    in.mark(readlimit);
+    mark = left;
+    // it's okay to mark even if mark isn't supported, as reset won't work
+  }
+
+  @Override public int read() throws IOException {
+    if (left == 0) {
+      return -1;
+    }
+
+    int result = in.read();
+    if (result != -1) {
+      --left;
+    }
+    return result;
+  }
+
+  @Override public int read(byte[] b, int off, int len) throws IOException {
+    if (left == 0) {
+      return -1;
+    }
+
+    len = (int) Math.min(len, left);
+    int result = in.read(b, off, len);
+    if (result != -1) {
+      left -= result;
+    }
+    return result;
+  }
+
+  @Override public synchronized void reset() throws IOException {
+    if (!in.markSupported()) {
+      throw new IOException("Mark not supported");
+    }
+    if (mark == -1) {
+      throw new IOException("Mark not set");
+    }
+
+    in.reset();
+    left = mark;
+  }
+
+  @Override public long skip(long n) throws IOException {
+    n = Math.min(n, left);
+    long skipped = in.skip(n);
+    left -= skipped;
+    return skipped;
+  }
+}
\ No newline at end of file
diff --git a/asynchbase/src/test/java/com/yahoo/ycsb/db/AsyncHBaseTest.java b/asynchbase/src/test/java/com/yahoo/ycsb/db/AsyncHBaseTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..29a09a79677c7c42dd1b0a9177d9e24f0bbd0b53
--- /dev/null
+++ b/asynchbase/src/test/java/com/yahoo/ycsb/db/AsyncHBaseTest.java
@@ -0,0 +1,211 @@
+/**
+ * Copyright (c) 2016 YCSB contributors. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+package com.yahoo.ycsb.db;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+
+import com.yahoo.ycsb.ByteIterator;
+import com.yahoo.ycsb.Status;
+import com.yahoo.ycsb.StringByteIterator;
+import com.yahoo.ycsb.db.AsyncHBaseClient;
+import com.yahoo.ycsb.measurements.Measurements;
+import com.yahoo.ycsb.workloads.CoreWorkload;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Properties;
+import java.util.Vector;
+
+/**
+ * Integration tests for the YCSB AsyncHBase client, using an HBase minicluster.
+ * These are the same as those for the hbase10 client.
+ */
+public class AsyncHBaseTest {
+
+  private final static String COLUMN_FAMILY = "cf";
+
+  private static HBaseTestingUtility testingUtil;
+  private AsyncHBaseClient client;
+  private Table table = null;
+
+  private static boolean isWindows() {
+    final String os = System.getProperty("os.name");
+    return os.startsWith("Windows");
+  }
+
+  /**
+   * Creates a mini-cluster for use in these tests.
+   *
+   * This is a heavy-weight operation, so invoked only once for the test class.
+   */
+  @BeforeClass
+  public static void setUpClass() throws Exception {
+    // Minicluster setup fails on Windows with an UnsatisfiedLinkError.
+    // Skip if windows.
+    assumeTrue(!isWindows());
+    testingUtil = HBaseTestingUtility.createLocalHTU();
+    testingUtil.startMiniCluster();
+  }
+
+  /**
+   * Tears down mini-cluster.
+   */
+  @AfterClass
+  public static void tearDownClass() throws Exception {
+    if (testingUtil != null) {
+      testingUtil.shutdownMiniCluster();
+    }
+  }
+
+  /**
+   * Sets up the mini-cluster for testing.
+   *
+   * We re-create the table for each test.
+   */
+  @Before
+  public void setUp() throws Exception {
+    Properties p = new Properties();
+    p.setProperty("columnfamily", COLUMN_FAMILY);
+
+    Measurements.setProperties(p);
+    final CoreWorkload workload = new CoreWorkload();
+    workload.init(p);
+
+    table = testingUtil.createTable(TableName.valueOf(CoreWorkload.table), Bytes.toBytes(COLUMN_FAMILY));
+
+    final String zkQuorum = "127.0.0.1:" + testingUtil.getZkCluster().getClientPort();
+    p.setProperty("hbase.zookeeper.quorum", zkQuorum);
+    client = new AsyncHBaseClient();
+    client.setProperties(p);
+    client.init();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    table.close();
+    testingUtil.deleteTable(CoreWorkload.table);
+  }
+
+  @Test
+  public void testRead() throws Exception {
+    final String rowKey = "row1";
+    final Put p = new Put(Bytes.toBytes(rowKey));
+    p.addColumn(Bytes.toBytes(COLUMN_FAMILY),
+        Bytes.toBytes("column1"), Bytes.toBytes("value1"));
+    p.addColumn(Bytes.toBytes(COLUMN_FAMILY),
+        Bytes.toBytes("column2"), Bytes.toBytes("value2"));
+    table.put(p);
+
+    final HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
+    final Status status = client.read(CoreWorkload.table, rowKey, null, result);
+    assertEquals(Status.OK, status);
+    assertEquals(2, result.size());
+    assertEquals("value1", result.get("column1").toString());
+    assertEquals("value2", result.get("column2").toString());
+  }
+
+  @Test
+  public void testReadMissingRow() throws Exception {
+    final HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
+    final Status status = client.read(CoreWorkload.table, "Missing row", null, result);
+    assertEquals(Status.NOT_FOUND, status);
+    assertEquals(0, result.size());
+  }
+
+  @Test
+  public void testScan() throws Exception {
+    // Fill with data
+    final String colStr = "row_number";
+    final byte[] col = Bytes.toBytes(colStr);
+    final int n = 10;
+    final List<Put> puts = new ArrayList<Put>(n);
+    for(int i = 0; i < n; i++) {
+      final byte[] key = Bytes.toBytes(String.format("%05d", i));
+      final byte[] value = java.nio.ByteBuffer.allocate(4).putInt(i).array();
+      final Put p = new Put(key);
+      p.addColumn(Bytes.toBytes(COLUMN_FAMILY), col, value);
+      puts.add(p);
+    }
+    table.put(puts);
+
+    // Test
+    final Vector<HashMap<String, ByteIterator>> result =
+        new Vector<HashMap<String, ByteIterator>>();
+
+    // Scan 5 records, skipping the first
+    client.scan(CoreWorkload.table, "00001", 5, null, result);
+
+    assertEquals(5, result.size());
+    for(int i = 0; i < 5; i++) {
+      final HashMap<String, ByteIterator> row = result.get(i);
+      assertEquals(1, row.size());
+      assertTrue(row.containsKey(colStr));
+      final byte[] bytes = row.get(colStr).toArray();
+      final ByteBuffer buf = ByteBuffer.wrap(bytes);
+      final int rowNum = buf.getInt();
+      assertEquals(i + 1, rowNum);
+    }
+  }
+
+  @Test
+  public void testUpdate() throws Exception{
+    final String key = "key";
+    final HashMap<String, String> input = new HashMap<String, String>();
+    input.put("column1", "value1");
+    input.put("column2", "value2");
+    final Status status = client.insert(CoreWorkload.table, key, StringByteIterator.getByteIteratorMap(input));
+    assertEquals(Status.OK, status);
+
+    // Verify result
+    final Get get = new Get(Bytes.toBytes(key));
+    final Result result = this.table.get(get);
+    assertFalse(result.isEmpty());
+    assertEquals(2, result.size());
+    for(final java.util.Map.Entry<String, String> entry : input.entrySet()) {
+      assertEquals(entry.getValue(),
+          new String(result.getValue(Bytes.toBytes(COLUMN_FAMILY),
+            Bytes.toBytes(entry.getKey()))));
+    }
+  }
+
+  @Test
+  @Ignore("Not yet implemented")
+  public void testDelete() {
+    fail("Not yet implemented");
+  }
+}
+
diff --git a/asynchbase/src/test/resources/hbase-site.xml b/asynchbase/src/test/resources/hbase-site.xml
new file mode 100644
index 0000000000000000000000000000000000000000..a8b29e451f440ad7c09b3b2f25eebaf56f07e6bb
--- /dev/null
+++ b/asynchbase/src/test/resources/hbase-site.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2016 YCSB contributors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you
+may not use this file except in compliance with the License. You
+may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied. See the License for the specific language governing
+permissions and limitations under the License. See accompanying
+LICENSE file.
+-->
+
+<configuration>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase master web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase regionserver web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+</configuration>
diff --git a/asynchbase/src/test/resources/log4j.properties b/asynchbase/src/test/resources/log4j.properties
new file mode 100644
index 0000000000000000000000000000000000000000..a9df32e044b9374097b9c110a79f35ff34b5a793
--- /dev/null
+++ b/asynchbase/src/test/resources/log4j.properties
@@ -0,0 +1,28 @@
+#
+# Copyright (c) 2015 YCSB contributors. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you
+# may not use this file except in compliance with the License. You
+# may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License. See accompanying
+# LICENSE file.
+#
+
+# Root logger option
+log4j.rootLogger=WARN, stderr
+
+log4j.appender.stderr=org.apache.log4j.ConsoleAppender
+log4j.appender.stderr.target=System.err
+log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
+log4j.appender.stderr.layout.conversionPattern=%d{yyyy/MM/dd HH:mm:ss} %-5p %c %x - %m%n
+
+# Suppress messages from ZKTableStateManager: Creates a large number of table
+# state change messages.
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKTableStateManager=ERROR
diff --git a/bin/ycsb b/bin/ycsb
index 8bf00bba3e730fb8ea1a4b1990bb80089864ae30..4db2926e8d043d23947018aa96f5b8e161a145aa 100755
--- a/bin/ycsb
+++ b/bin/ycsb
@@ -23,11 +23,13 @@ import os
 import shlex
 import sys
 import subprocess
+
 try:
+    mod = __import__('argparse')
     import argparse
 except ImportError:
     print >> sys.stderr, '[ERROR] argparse not found. Try installing it via "pip".'
-    raise
+    exit(1)
 
 BASE_URL = "https://github.com/brianfrankcooper/YCSB/tree/master/"
 COMMANDS = {
@@ -51,6 +53,7 @@ COMMANDS = {
 DATABASES = {
     "accumulo"     : "com.yahoo.ycsb.db.accumulo.AccumuloClient",
     "aerospike"    : "com.yahoo.ycsb.db.AerospikeClient",
+    "asynchbase"   : "com.yahoo.ycsb.db.AsyncHBaseClient",
     "basic"        : "com.yahoo.ycsb.BasicDB",
     "cassandra-7"  : "com.yahoo.ycsb.db.CassandraClient7",
     "cassandra-8"  : "com.yahoo.ycsb.db.CassandraClient8",
@@ -58,9 +61,11 @@ DATABASES = {
     "cassandra-cql": "com.yahoo.ycsb.db.CassandraCQLClient",
     "cassandra2-cql": "com.yahoo.ycsb.db.CassandraCQLClient",
     "couchbase"    : "com.yahoo.ycsb.db.CouchbaseClient",
+    "couchbase2"   : "com.yahoo.ycsb.db.couchbase2.Couchbase2Client",
     "dynamodb"     : "com.yahoo.ycsb.db.DynamoDBClient",
     "elasticsearch": "com.yahoo.ycsb.db.ElasticsearchClient",
     "geode"        : "com.yahoo.ycsb.db.GeodeClient",
+    "googlebigtable"  : "com.yahoo.ycsb.db.GoogleBigtableClient",
     "googledatastore" : "com.yahoo.ycsb.db.GoogleDatastoreClient",
     "hbase094"     : "com.yahoo.ycsb.db.HBaseClient",
     "hbase098"     : "com.yahoo.ycsb.db.HBaseClient",
@@ -78,6 +83,7 @@ DATABASES = {
     "orientdb"     : "com.yahoo.ycsb.db.OrientDBClient",
     "rados"        : "com.yahoo.ycsb.db.RadosClient",
     "redis"        : "com.yahoo.ycsb.db.RedisClient",
+    "riak"         : "com.yahoo.ycsb.db.riak.RiakKVClient",
     "s3"           : "com.yahoo.ycsb.db.S3Client",
     "solr"         : "com.yahoo.ycsb.db.SolrClient",
     "tarantool"    : "com.yahoo.ycsb.db.TarantoolClient",
@@ -119,12 +125,41 @@ def usage():
 
     return output.getvalue()
 
-def check_output(cmd):
-    p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
-    stdout, _ = p.communicate()
-    if p.returncode:
-        raise subprocess.CalledProcessError(p.returncode, cmd)
-    return stdout
+# Python 2.6 doesn't have check_output. Add the method as it is in Python 2.7
+# Based on https://github.com/python/cpython/blob/2.7/Lib/subprocess.py#L545
+def check_output(*popenargs, **kwargs):
+    r"""Run command with arguments and return its output as a byte string.
+
+    If the exit code was non-zero it raises a CalledProcessError.  The
+    CalledProcessError object will have the return code in the returncode
+    attribute and output in the output attribute.
+
+    The arguments are the same as for the Popen constructor.  Example:
+
+    >>> check_output(["ls", "-l", "/dev/null"])
+    'crw-rw-rw- 1 root root 1, 3 Oct 18  2007 /dev/null\n'
+
+    The stdout argument is not allowed as it is used internally.
+    To capture standard error in the result, use stderr=STDOUT.
+
+    >>> check_output(["/bin/sh", "-c",
+    ...               "ls -l non_existent_file ; exit 0"],
+    ...              stderr=STDOUT)
+    'ls: non_existent_file: No such file or directory\n'
+    """
+    if 'stdout' in kwargs:
+        raise ValueError('stdout argument not allowed, it will be overridden.')
+    process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
+    output, unused_err = process.communicate()
+    retcode = process.poll()
+    if retcode:
+        cmd = kwargs.get("args")
+        if cmd is None:
+            cmd = popenargs[0]
+        error = subprocess.CalledProcessError(retcode, cmd)
+        error.output = output
+        raise error
+    return output
 
 def debug(message):
     print >> sys.stderr, "[DEBUG] ", message
diff --git a/binding-parent/datastore-specific-descriptor/pom.xml b/binding-parent/datastore-specific-descriptor/pom.xml
index 840242e7f5f71b824213e14e3974a0edd86bc31e..e428b52d8027d82782a794edd01162b8368f15b3 100644
--- a/binding-parent/datastore-specific-descriptor/pom.xml
+++ b/binding-parent/datastore-specific-descriptor/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>root</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../../</relativePath>
   </parent>
 
diff --git a/binding-parent/pom.xml b/binding-parent/pom.xml
index 0416208b2b9846e22d3a43117c8c64c2e44c5b56..d4d457a633e1bdf814fa0893ae7999e153d9695d 100644
--- a/binding-parent/pom.xml
+++ b/binding-parent/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>root</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
   </parent>
 
   <artifactId>binding-parent</artifactId>
diff --git a/cassandra/pom.xml b/cassandra/pom.xml
index 274ed5264506b69366abbb75180fc7c3b40265f3..890504eaac41acf9b8274b558e927b46557d5fa4 100644
--- a/cassandra/pom.xml
+++ b/cassandra/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
      <groupId>com.yahoo.ycsb</groupId>
      <artifactId>binding-parent</artifactId>
-     <version>0.9.0-SNAPSHOT</version>
+     <version>0.10.0-SNAPSHOT</version>
      <relativePath>../binding-parent</relativePath>
   </parent>
   
diff --git a/cassandra2/pom.xml b/cassandra2/pom.xml
index 1a4b0ab6eb7fcddb87a3c29880c61b8665d77b04..460cf21588c2f7821ddf0496054d2d07087677d1 100644
--- a/cassandra2/pom.xml
+++ b/cassandra2/pom.xml
@@ -23,7 +23,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
diff --git a/core/pom.xml b/core/pom.xml
index 3300cf485f3d63e505e349350605830d6c12c8b1..fa84bb7e3efcccc5db28fba8c9b77fa2fe9fd5cf 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>root</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
   </parent>
   
   <artifactId>core</artifactId>
@@ -34,6 +34,11 @@ LICENSE file.
   </properties>
 
   <dependencies>	
+    <dependency>
+      <groupId>org.apache.htrace</groupId>
+      <artifactId>htrace-core4</artifactId>
+      <version>4.1.0-incubating</version>
+    </dependency>
     <dependency>
       <groupId>org.codehaus.jackson</groupId>
       <artifactId>jackson-mapper-asl</artifactId>
diff --git a/core/src/main/java/com/yahoo/ycsb/Client.java b/core/src/main/java/com/yahoo/ycsb/Client.java
index d64772564d81e90e5daee589ef8fc9ba28ca8d7b..de87be7c9b978a522b9ad9a6d7a8f86098cd3e4f 100644
--- a/core/src/main/java/com/yahoo/ycsb/Client.java
+++ b/core/src/main/java/com/yahoo/ycsb/Client.java
@@ -27,12 +27,19 @@ import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.Enumeration;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.LockSupport;
 
+import org.apache.htrace.core.Tracer;
+import org.apache.htrace.core.TraceScope;
+import org.apache.htrace.core.HTraceConfiguration;
+
 import com.yahoo.ycsb.measurements.Measurements;
 import com.yahoo.ycsb.measurements.exporter.MeasurementsExporter;
 import com.yahoo.ycsb.measurements.exporter.TextMeasurementsExporter;
@@ -47,6 +54,12 @@ class StatusThread extends Thread
   /** Counts down each of the clients completing. */
   private final CountDownLatch _completeLatch;
 
+  /** Stores the measurements for the run. */
+  private final Measurements _measurements;
+  
+  /** Whether or not to track the JVM stats per run */
+  private final boolean _trackJVMStats;
+  
   /** The clients that are running. */
   private final List<ClientThread> _clients;
 
@@ -56,8 +69,18 @@ class StatusThread extends Thread
   /** The interval for reporting status. */
   private long _sleeptimeNs;
 
+  /** JVM max/mins */
+  private int _maxThreads;
+  private int _minThreads = Integer.MAX_VALUE;
+  private long _maxUsedMem;
+  private long _minUsedMem = Long.MAX_VALUE;
+  private double _maxLoadAvg;
+  private double _minLoadAvg = Double.MAX_VALUE;
+  private long lastGCCount = 0;
+  private long lastGCTime = 0;
+
   /**
-   * Creates a new StatusThread.
+   * Creates a new StatusThread without JVM stat tracking.
    *
    * @param completeLatch The latch that each client thread will {@link CountDownLatch#countDown()} as they complete.
    * @param clients The clients to collect metrics from.
@@ -67,14 +90,33 @@ class StatusThread extends Thread
    */
   public StatusThread(CountDownLatch completeLatch, List<ClientThread> clients,
                       String label, boolean standardstatus, int statusIntervalSeconds)
+  {
+    this(completeLatch, clients, label, standardstatus, statusIntervalSeconds, false);
+  }
+  
+  /**
+   * Creates a new StatusThread.
+   *
+   * @param completeLatch The latch that each client thread will {@link CountDownLatch#countDown()} as they complete.
+   * @param clients The clients to collect metrics from.
+   * @param label The label for the status.
+   * @param standardstatus If true the status is printed to stdout in addition to stderr.
+   * @param statusIntervalSeconds The number of seconds between status updates.
+   * @param trackJVMStats Whether or not to track JVM stats.
+   */
+  public StatusThread(CountDownLatch completeLatch, List<ClientThread> clients,
+                      String label, boolean standardstatus, int statusIntervalSeconds,
+                      boolean trackJVMStats)
   {
     _completeLatch=completeLatch;
     _clients=clients;
     _label=label;
     _standardstatus=standardstatus;
     _sleeptimeNs=TimeUnit.SECONDS.toNanos(statusIntervalSeconds);
+    _measurements = Measurements.getMeasurements();
+    _trackJVMStats = trackJVMStats;
   }
-
+  
   /**
    * Run and periodically report status.
    */
@@ -94,6 +136,10 @@ class StatusThread extends Thread
       long nowMs=System.currentTimeMillis();
 
       lastTotalOps = computeStats(startTimeMs, startIntervalMs, nowMs, lastTotalOps);
+      
+      if (_trackJVMStats) {
+        measureJVM();
+      }
 
       alldone = waitForClientsUntil(deadline);
 
@@ -102,6 +148,9 @@ class StatusThread extends Thread
     }
     while (!alldone);
 
+    if (_trackJVMStats) {
+      measureJVM();
+    }
     // Print the final stats.
     computeStats(startTimeMs, startIntervalMs, System.currentTimeMillis(), lastTotalOps);
   }
@@ -187,6 +236,86 @@ class StatusThread extends Thread
 
     return alldone;
   }
+
+  /** Executes the JVM measurements. */
+  private void measureJVM() {
+    final int threads = Utils.getActiveThreadCount();
+    if (threads < _minThreads) {
+      _minThreads = threads;
+    }
+    if (threads > _maxThreads) {
+      _maxThreads = threads;
+    }
+    _measurements.measure("THREAD_COUNT", threads);
+    
+    // TODO - once measurements allow for other number types, switch to using
+    // the raw bytes. Otherwise we can track in MB to avoid negative values 
+    // when faced with huge heaps.
+    final int usedMem = Utils.getUsedMemoryMegaBytes();
+    if (usedMem < _minUsedMem) {
+      _minUsedMem = usedMem;
+    }
+    if (usedMem > _maxUsedMem) {
+      _maxUsedMem = usedMem;
+    }
+    _measurements.measure("USED_MEM_MB", usedMem);
+    
+    // Some JVMs may not implement this feature so if the value is less than
+    // zero, just ommit it.
+    final double systemLoad = Utils.getSystemLoadAverage();
+    if (systemLoad >= 0) {
+      // TODO - store the double if measurements allows for them
+      _measurements.measure("SYS_LOAD_AVG", (int)systemLoad);
+      if (systemLoad > _maxLoadAvg) {
+        _maxLoadAvg = systemLoad;
+      }
+      if (systemLoad < _minLoadAvg) {
+        _minLoadAvg = systemLoad;
+      }
+    }
+     
+    final long gcs = Utils.getGCTotalCollectionCount();
+    _measurements.measure("GCS", (int)(gcs - lastGCCount));
+    final long gcTime = Utils.getGCTotalTime();
+    _measurements.measure("GCS_TIME", (int)(gcTime - lastGCTime));
+    lastGCCount = gcs;
+    lastGCTime = gcTime;
+  }
+  
+  /** @return The maximum threads running during the test. */
+  public int getMaxThreads() {
+    return _maxThreads;
+  }
+  
+  /** @return The minimum threads running during the test. */
+  public int getMinThreads() {
+    return _minThreads;
+  }
+  
+  /** @return The maximum memory used during the test. */
+  public long getMaxUsedMem() {
+    return _maxUsedMem;
+  }
+  
+  /** @return The minimum memory used during the test. */
+  public long getMinUsedMem() {
+    return _minUsedMem;
+  }
+  
+  /** @return The maximum load average during the test. */
+  public double getMaxLoadAvg() {
+    return _maxLoadAvg;
+  }
+  
+  /** @return The minimum load average during the test. */
+  public double getMinLoadAvg() {
+    return _minLoadAvg;
+  }
+
+  /** @return Whether or not the thread is tracking JVM stats. */
+  public boolean trackJVMStats() {
+    return _trackJVMStats;
+  }
 }
 
 /**
@@ -228,7 +357,7 @@ class RemainingFormatter {
  * @author cooperb
  *
  */
-class ClientThread extends Thread
+class ClientThread implements Runnable
 {
   /** Counts down each of the clients completing. */
   private final CountDownLatch _completeLatch;
@@ -474,6 +603,20 @@ public class Client
    */
   public static final String DO_TRANSACTIONS_PROPERTY = "dotransactions";
 
+  /** An optional thread used to track progress and measure JVM stats. */
+  private static StatusThread statusthread = null;
+
+  // HTrace integration related constants.
+
+  /**
+   * All keys for configuring the tracing system start with this prefix.
+   */
+  private static final String HTRACE_KEY_PREFIX="htrace.";
+  private static final String CLIENT_WORKLOAD_INIT_SPAN = "Client#workload_init";
+  private static final String CLIENT_INIT_SPAN = "Client#init";
+  private static final String CLIENT_WORKLOAD_SPAN = "Client#workload";
+  private static final String CLIENT_CLEANUP_SPAN = "Client#cleanup";
+  private static final String CLIENT_EXPORT_MEASUREMENTS_SPAN = "Client#export_measurements";
 
   public static void usageMessage()
   {
@@ -553,6 +696,29 @@ public class Client
       exporter.write("OVERALL", "RunTime(ms)", runtime);
       double throughput = 1000.0 * (opcount) / (runtime);
       exporter.write("OVERALL", "Throughput(ops/sec)", throughput);
+      
+      final Map<String, Long[]> gcs = Utils.getGCStatst();
+      long totalGCCount = 0;
+      long totalGCTime = 0;
+      for (final Entry<String, Long[]> entry : gcs.entrySet()) {
+        exporter.write("TOTAL_GCS_" + entry.getKey(), "Count", entry.getValue()[0]);
+        exporter.write("TOTAL_GC_TIME_" + entry.getKey(), "Time(ms)", entry.getValue()[1]);
+        exporter.write("TOTAL_GC_TIME_%_" + entry.getKey(), "Time(%)",((double)entry.getValue()[1] / runtime) * (double)100);
+        totalGCCount += entry.getValue()[0];
+        totalGCTime += entry.getValue()[1];
+      }
+      exporter.write("TOTAL_GCs", "Count", totalGCCount);
+      
+      exporter.write("TOTAL_GC_TIME", "Time(ms)", totalGCTime);
+      exporter.write("TOTAL_GC_TIME_%", "Time(%)", ((double)totalGCTime / runtime) * (double)100);
+      if (statusthread != null && statusthread.trackJVMStats()) {
+        exporter.write("MAX_MEM_USED", "MBs", statusthread.getMaxUsedMem());
+        exporter.write("MIN_MEM_USED", "MBs", statusthread.getMinUsedMem());
+        exporter.write("MAX_THREADS", "Count", statusthread.getMaxThreads());
+        exporter.write("MIN_THREADS", "Count", statusthread.getMinThreads());
+        exporter.write("MAX_SYS_LOAD_AVG", "Load", statusthread.getMaxLoadAvg());
+        exporter.write("MIN_SYS_LOAD_AVG", "Load", statusthread.getMinLoadAvg());
+      }
 
       Measurements.getMeasurements().exportMeasurements(exporter);
     } finally
@@ -771,7 +937,15 @@ public class Client
       double targetperthread=((double)target)/((double)threadcount);
       targetperthreadperms=targetperthread/1000.0;
     }
-    
+
+    final Map<String, String> filteredProperties = new HashMap<>();
+    for (String key : props.stringPropertyNames()) {
+      if (key.startsWith(HTRACE_KEY_PREFIX)) {
+        filteredProperties.put(key.substring(HTRACE_KEY_PREFIX.length()), props.getProperty(key));
+      }
+    }
+    final HTraceConfiguration conf =  HTraceConfiguration.fromMap(filteredProperties);
+
     //show a warning message that creating the workload is taking a while
     //but only do so if it is taking longer than 2 seconds
     //(showing the message right away if the setup wasn't taking very long was confusing people)
@@ -792,6 +966,7 @@ public class Client
         }
       };
 
+
     warningthread.start();
 
     //set up measurements
@@ -816,13 +991,13 @@ public class Client
     System.err.println();
     System.err.println("Loading workload...");
 
-    Workload workload=null;
+    Workload workload = null;
 
     try
     {
       Class workloadclass = classLoader.loadClass(props.getProperty(WORKLOAD_PROPERTY));
 
-      workload=(Workload)workloadclass.newInstance();
+      workload = (Workload)workloadclass.newInstance();
     }
     catch (Exception e)
     {
@@ -831,9 +1006,16 @@ public class Client
       System.exit(0);
     }
 
+    final Tracer tracer = new Tracer.Builder("YCSB " + workload.getClass().getSimpleName())
+        .conf(conf)
+        .build();
+
     try
     {
-      workload.init(props);
+      try (final TraceScope span = tracer.newScope(CLIENT_WORKLOAD_INIT_SPAN)) {
+        workload.init(props);
+        warningthread.interrupt();
+      }
     }
     catch (WorkloadException e)
     {
@@ -842,59 +1024,66 @@ public class Client
       System.exit(0);
     }
 
-    warningthread.interrupt();
-
     //run the workload
 
     System.err.println("Starting test.");
+    final CountDownLatch completeLatch = new CountDownLatch(threadcount);
+    final List<ClientThread> clients = new ArrayList<ClientThread>(threadcount);
 
-    int opcount;
-    if (dotransactions)
-    {
-      opcount=Integer.parseInt(props.getProperty(OPERATION_COUNT_PROPERTY,"0"));
-    }
-    else
-    {
-      if (props.containsKey(INSERT_COUNT_PROPERTY))
+    boolean initFailed = false;
+    try (final TraceScope span = tracer.newScope(CLIENT_INIT_SPAN)) {
+
+      int opcount;
+      if (dotransactions)
       {
-        opcount=Integer.parseInt(props.getProperty(INSERT_COUNT_PROPERTY,"0"));
+        opcount=Integer.parseInt(props.getProperty(OPERATION_COUNT_PROPERTY,"0"));
       }
       else
       {
-        opcount=Integer.parseInt(props.getProperty(RECORD_COUNT_PROPERTY, DEFAULT_RECORD_COUNT));
+        if (props.containsKey(INSERT_COUNT_PROPERTY))
+        {
+          opcount=Integer.parseInt(props.getProperty(INSERT_COUNT_PROPERTY,"0"));
+        }
+        else
+        {
+          opcount=Integer.parseInt(props.getProperty(RECORD_COUNT_PROPERTY, DEFAULT_RECORD_COUNT));
+        }
       }
-    }
 
-    CountDownLatch completeLatch=new CountDownLatch(threadcount);
-    final List<ClientThread> clients=new ArrayList<ClientThread>(threadcount);
-    for (int threadid=0; threadid<threadcount; threadid++)
-    {
-      DB db=null;
-      try
-      {
-        db=DBFactory.newDB(dbname,props);
-      }
-      catch (UnknownDBException e)
+      for (int threadid=0; threadid<threadcount; threadid++)
       {
-        System.out.println("Unknown DB "+dbname);
-        System.exit(0);
-      }
+        DB db = null;
+        try
+        {
+          db = DBFactory.newDB(dbname, props, tracer);
+        }
+        catch (UnknownDBException e)
+        {
+          System.out.println("Unknown DB " + dbname);
+          initFailed = true;
+          break;
+        }
 
 
-      int threadopcount = opcount/threadcount;
+        int threadopcount = opcount / threadcount;
 
-      // ensure correct number of operations, in case opcount is not a multiple of threadcount
-      if (threadid<opcount%threadcount)
-      {
-        ++threadopcount;
-      }
+        // ensure correct number of operations, in case opcount is not a multiple of threadcount
+        if (threadid<opcount%threadcount)
+        {
+          ++threadopcount;
+        }
 
-      ClientThread t=new ClientThread(db,dotransactions,workload,props,threadopcount, targetperthreadperms, completeLatch);
+        ClientThread t=new ClientThread(db,dotransactions,workload,props,threadopcount, targetperthreadperms, completeLatch);
+
+        clients.add(t);
+      }
 
-      clients.add(t);
     }
 
-    StatusThread statusthread=null;
+    if (initFailed) {
+      System.err.println("Error initializing datastore bindings.");
+      System.exit(0);
+    }
 
     if (status)
     {
@@ -904,58 +1093,75 @@ public class Client
         standardstatus=true;
       }
       int statusIntervalSeconds = Integer.parseInt(props.getProperty("status.interval","10"));
-      statusthread=new StatusThread(completeLatch,clients,label,standardstatus,statusIntervalSeconds);
+      boolean trackJVMStats = props.getProperty(Measurements.MEASUREMENT_TRACK_JVM_PROPERTY, 
+          Measurements.MEASUREMENT_TRACK_JVM_PROPERTY_DEFAULT).equals("true");
+      statusthread=new StatusThread(completeLatch,clients,label,standardstatus,statusIntervalSeconds,trackJVMStats);
       statusthread.start();
     }
 
-    long st=System.currentTimeMillis();
-
-    for (Thread t : clients)
-    {
-      t.start();
-    }
-
     Thread terminator = null;
+    long st;
+    long en;
+    int opsDone;
 
-    if (maxExecutionTime > 0) {
-      terminator = new TerminatorThread(maxExecutionTime, clients, workload);
-      terminator.start();
-    }
-
-    int opsDone = 0;
+    try (final TraceScope span = tracer.newScope(CLIENT_WORKLOAD_SPAN)) {
 
-    for (Thread t : clients)
-    {
-      try
-      {
-        t.join();
-        opsDone += ((ClientThread)t).getOpsDone();
+      final Map<Thread, ClientThread> threads = new HashMap<Thread, ClientThread>(threadcount);
+      for (ClientThread client : clients) {
+        threads.put(new Thread(tracer.wrap(client, "ClientThread")), client);
       }
-      catch (InterruptedException e)
+
+      st=System.currentTimeMillis();
+
+      for (Thread t : threads.keySet())
       {
+        t.start();
       }
-    }
 
-    long en=System.currentTimeMillis();
+      if (maxExecutionTime > 0) {
+        terminator = new TerminatorThread(maxExecutionTime, threads.keySet(), workload);
+        terminator.start();
+      }
 
-    if (terminator != null && !terminator.isInterrupted()) {
-      terminator.interrupt();
-    }
+      opsDone = 0;
 
-    if (status)
-    {
-      // wake up status thread if it's asleep
-      statusthread.interrupt();
-      // at this point we assume all the monitored threads are already gone as per above join loop.
-      try {
-        statusthread.join();
-      } catch (InterruptedException e) {
+      for (Map.Entry<Thread, ClientThread> entry : threads.entrySet())
+      {
+        try
+        {
+          entry.getKey().join();
+          opsDone += entry.getValue().getOpsDone();
+        }
+        catch (InterruptedException e)
+        {
+        }
       }
+
+      en=System.currentTimeMillis();
+
     }
 
     try
     {
-      workload.cleanup();
+      try (final TraceScope span = tracer.newScope(CLIENT_CLEANUP_SPAN)) {
+
+        if (terminator != null && !terminator.isInterrupted()) {
+          terminator.interrupt();
+        }
+
+        if (status)
+        {
+          // wake up status thread if it's asleep
+          statusthread.interrupt();
+          // at this point we assume all the monitored threads are already gone as per above join loop.
+          try {
+            statusthread.join();
+          } catch (InterruptedException e) {
+          }
+        }
+
+        workload.cleanup();
+      }
     }
     catch (WorkloadException e)
     {
@@ -966,7 +1172,9 @@ public class Client
 
     try
     {
-      exportMeasurements(props, opsDone, en - st);
+      try (final TraceScope span = tracer.newScope(CLIENT_EXPORT_MEASUREMENTS_SPAN)) {
+        exportMeasurements(props, opsDone, en - st);
+      }
     } catch (IOException e)
     {
       System.err.println("Could not export measurements, error: " + e.getMessage());
diff --git a/core/src/main/java/com/yahoo/ycsb/DBFactory.java b/core/src/main/java/com/yahoo/ycsb/DBFactory.java
index 18f7f5e1f1efbd1b5bd0c59f61f59452e45713c6..2096c0d139ad74729424feaff917ddd5a9226b53 100644
--- a/core/src/main/java/com/yahoo/ycsb/DBFactory.java
+++ b/core/src/main/java/com/yahoo/ycsb/DBFactory.java
@@ -18,6 +18,7 @@
 package com.yahoo.ycsb;
 
 import java.util.Properties;
+import org.apache.htrace.core.Tracer;
 
 /**
  * Creates a DB layer by dynamically classloading the specified DB class.
@@ -25,7 +26,7 @@ import java.util.Properties;
 public class DBFactory
 {
       @SuppressWarnings("unchecked")
-	public static DB newDB(String dbname, Properties properties) throws UnknownDBException
+	public static DB newDB(String dbname, Properties properties, final Tracer tracer) throws UnknownDBException
       {
 	 ClassLoader classLoader = DBFactory.class.getClassLoader();
 
@@ -46,7 +47,7 @@ public class DBFactory
 	 
 	 ret.setProperties(properties);
 
-	 return new DBWrapper(ret);
+	 return new DBWrapper(ret, tracer);
       }
       
 }
diff --git a/core/src/main/java/com/yahoo/ycsb/DBWrapper.java b/core/src/main/java/com/yahoo/ycsb/DBWrapper.java
index 337f4d9b5d04d0ee5ceb337adb65169d6e7b2862..0109c519558056ab0d762b64b2350dda6fa16b7c 100644
--- a/core/src/main/java/com/yahoo/ycsb/DBWrapper.java
+++ b/core/src/main/java/com/yahoo/ycsb/DBWrapper.java
@@ -24,6 +24,9 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.Vector;
 
+import org.apache.htrace.core.Tracer;
+import org.apache.htrace.core.TraceScope;
+
 import com.yahoo.ycsb.measurements.Measurements;
 
 /**
@@ -32,8 +35,9 @@ import com.yahoo.ycsb.measurements.Measurements;
  */
 public class DBWrapper extends DB
 {
-  private DB _db;
-  private Measurements _measurements;
+  private final DB _db;
+  private final Measurements _measurements;
+  private final Tracer _tracer;
 
   private boolean reportLatencyForEachError = false;
   private HashSet<String> latencyTrackedErrors = new HashSet<String>();
@@ -46,10 +50,27 @@ public class DBWrapper extends DB
   private static final String LATENCY_TRACKED_ERRORS_PROPERTY =
       "latencytrackederrors";
 
-  public DBWrapper(DB db)
+  private final String SCOPE_STRING_CLEANUP;
+  private final String SCOPE_STRING_DELETE;
+  private final String SCOPE_STRING_INIT;
+  private final String SCOPE_STRING_INSERT;
+  private final String SCOPE_STRING_READ;
+  private final String SCOPE_STRING_SCAN;
+  private final String SCOPE_STRING_UPDATE;
+
+  public DBWrapper(final DB db, final Tracer tracer)
   {
     _db=db;
     _measurements=Measurements.getMeasurements();
+    _tracer = tracer;
+    final String simple = db.getClass().getSimpleName();
+    SCOPE_STRING_CLEANUP = simple + "#cleanup";
+    SCOPE_STRING_DELETE = simple + "#delete";
+    SCOPE_STRING_INIT = simple + "#init";
+    SCOPE_STRING_INSERT = simple + "#insert";
+    SCOPE_STRING_READ = simple + "#read";
+    SCOPE_STRING_SCAN = simple + "#scan";
+    SCOPE_STRING_UPDATE = simple + "#update";
   }
 
   /**
@@ -74,24 +95,26 @@ public class DBWrapper extends DB
    */
   public void init() throws DBException
   {
-    _db.init();
-
-    this.reportLatencyForEachError = Boolean.parseBoolean(getProperties().
-        getProperty(REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY,
-            REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY_DEFAULT));
-
-    if (!reportLatencyForEachError) {
-      String latencyTrackedErrors = getProperties().getProperty(
-          LATENCY_TRACKED_ERRORS_PROPERTY, null);
-      if (latencyTrackedErrors != null) {
-        this.latencyTrackedErrors = new HashSet<String>(Arrays.asList(
-            latencyTrackedErrors.split(",")));
+    try (final TraceScope span = _tracer.newScope(SCOPE_STRING_INIT)) {
+      _db.init();
+
+      this.reportLatencyForEachError = Boolean.parseBoolean(getProperties().
+          getProperty(REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY,
+              REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY_DEFAULT));
+
+      if (!reportLatencyForEachError) {
+        String latencyTrackedErrors = getProperties().getProperty(
+            LATENCY_TRACKED_ERRORS_PROPERTY, null);
+        if (latencyTrackedErrors != null) {
+          this.latencyTrackedErrors = new HashSet<String>(Arrays.asList(
+              latencyTrackedErrors.split(",")));
+        }
       }
-    }
 
-    System.err.println("DBWrapper: report latency for each error is " +
-        this.reportLatencyForEachError + " and specific error codes to track" +
-        " for latency are: " + this.latencyTrackedErrors.toString());
+      System.err.println("DBWrapper: report latency for each error is " +
+          this.reportLatencyForEachError + " and specific error codes to track" +
+          " for latency are: " + this.latencyTrackedErrors.toString());
+    }
   }
 
   /**
@@ -100,11 +123,13 @@ public class DBWrapper extends DB
    */
   public void cleanup() throws DBException
   {
-    long ist=_measurements.getIntendedtartTimeNs();
-    long st = System.nanoTime();
-    _db.cleanup();
-    long en=System.nanoTime();
-    measure("CLEANUP", Status.OK, ist, st, en);
+    try (final TraceScope span = _tracer.newScope(SCOPE_STRING_CLEANUP)) {
+      long ist=_measurements.getIntendedtartTimeNs();
+      long st = System.nanoTime();
+      _db.cleanup();
+      long en=System.nanoTime();
+      measure("CLEANUP", Status.OK, ist, st, en);
+    }
   }
 
   /**
@@ -120,13 +145,15 @@ public class DBWrapper extends DB
   public Status read(String table, String key, Set<String> fields,
       HashMap<String,ByteIterator> result)
   {
-    long ist=_measurements.getIntendedtartTimeNs();
-    long st = System.nanoTime();
-    Status res=_db.read(table,key,fields,result);
-    long en=System.nanoTime();
-    measure("READ", res, ist, st, en);
-    _measurements.reportStatus("READ", res);
-    return res;
+    try (final TraceScope span = _tracer.newScope(SCOPE_STRING_READ)) {
+      long ist=_measurements.getIntendedtartTimeNs();
+      long st = System.nanoTime();
+      Status res=_db.read(table,key,fields,result);
+      long en=System.nanoTime();
+      measure("READ", res, ist, st, en);
+      _measurements.reportStatus("READ", res);
+      return res;
+    }
   }
 
   /**
@@ -143,13 +170,15 @@ public class DBWrapper extends DB
   public Status scan(String table, String startkey, int recordcount,
       Set<String> fields, Vector<HashMap<String,ByteIterator>> result)
   {
-    long ist=_measurements.getIntendedtartTimeNs();
-    long st = System.nanoTime();
-    Status res=_db.scan(table,startkey,recordcount,fields,result);
-    long en=System.nanoTime();
-    measure("SCAN", res, ist, st, en);
-    _measurements.reportStatus("SCAN", res);
-    return res;
+    try (final TraceScope span = _tracer.newScope(SCOPE_STRING_SCAN)) {
+      long ist=_measurements.getIntendedtartTimeNs();
+      long st = System.nanoTime();
+      Status res=_db.scan(table,startkey,recordcount,fields,result);
+      long en=System.nanoTime();
+      measure("SCAN", res, ist, st, en);
+      _measurements.reportStatus("SCAN", res);
+      return res;
+    }
   }
 
   private void measure(String op, Status result, long intendedStartTimeNanos,
@@ -181,13 +210,15 @@ public class DBWrapper extends DB
   public Status update(String table, String key,
       HashMap<String,ByteIterator> values)
   {
-    long ist=_measurements.getIntendedtartTimeNs();
-    long st = System.nanoTime();
-    Status res=_db.update(table,key,values);
-    long en=System.nanoTime();
-    measure("UPDATE", res, ist, st, en);
-    _measurements.reportStatus("UPDATE", res);
-    return res;
+    try (final TraceScope span = _tracer.newScope(SCOPE_STRING_UPDATE)) {
+      long ist=_measurements.getIntendedtartTimeNs();
+      long st = System.nanoTime();
+      Status res=_db.update(table,key,values);
+      long en=System.nanoTime();
+      measure("UPDATE", res, ist, st, en);
+      _measurements.reportStatus("UPDATE", res);
+      return res;
+    }
   }
 
   /**
@@ -203,13 +234,15 @@ public class DBWrapper extends DB
   public Status insert(String table, String key,
       HashMap<String,ByteIterator> values)
   {
-    long ist=_measurements.getIntendedtartTimeNs();
-    long st = System.nanoTime();
-    Status res=_db.insert(table,key,values);
-    long en=System.nanoTime();
-    measure("INSERT", res, ist, st, en);
-    _measurements.reportStatus("INSERT", res);
-    return res;
+    try (final TraceScope span = _tracer.newScope(SCOPE_STRING_INSERT)) {
+      long ist=_measurements.getIntendedtartTimeNs();
+      long st = System.nanoTime();
+      Status res=_db.insert(table,key,values);
+      long en=System.nanoTime();
+      measure("INSERT", res, ist, st, en);
+      _measurements.reportStatus("INSERT", res);
+      return res;
+    }
   }
 
   /**
@@ -221,12 +254,14 @@ public class DBWrapper extends DB
    */
   public Status delete(String table, String key)
   {
-    long ist=_measurements.getIntendedtartTimeNs();
-    long st = System.nanoTime();
-    Status res=_db.delete(table,key);
-    long en=System.nanoTime();
-    measure("DELETE", res, ist, st, en);
-    _measurements.reportStatus("DELETE", res);
-    return res;
+    try (final TraceScope span = _tracer.newScope(SCOPE_STRING_DELETE)) {
+      long ist=_measurements.getIntendedtartTimeNs();
+      long st = System.nanoTime();
+      Status res=_db.delete(table,key);
+      long en=System.nanoTime();
+      measure("DELETE", res, ist, st, en);
+      _measurements.reportStatus("DELETE", res);
+      return res;
+    }
   }
 }
diff --git a/core/src/main/java/com/yahoo/ycsb/TerminatorThread.java b/core/src/main/java/com/yahoo/ycsb/TerminatorThread.java
index 62212f2c6dd0710f280c7516582440f0cd844d38..f15de5dfdaf2e4d9acb0d37d0cbf6f40e54d9be9 100644
--- a/core/src/main/java/com/yahoo/ycsb/TerminatorThread.java
+++ b/core/src/main/java/com/yahoo/ycsb/TerminatorThread.java
@@ -16,12 +16,11 @@
  */
 package com.yahoo.ycsb;
 
-import java.util.List;
-import java.util.Vector;
+import java.util.Collection;
 
 /**
  * A thread that waits for the maximum specified time and then interrupts all the client
- * threads passed as the Vector at initialization of this thread.
+ * threads passed at initialization of this thread.
  * 
  * The maximum execution time passed is assumed to be in seconds.
  * 
@@ -30,12 +29,12 @@ import java.util.Vector;
  */
 public class TerminatorThread extends Thread {
   
-  private final List<? extends Thread> threads;
+  private final Collection<? extends Thread> threads;
   private long maxExecutionTime;
   private Workload workload;
   private long waitTimeOutInMS;
   
-  public TerminatorThread(long maxExecutionTime, List<? extends Thread> threads, 
+  public TerminatorThread(long maxExecutionTime, Collection<? extends Thread> threads,
       Workload workload) {
     this.maxExecutionTime = maxExecutionTime;
     this.threads = threads;
diff --git a/core/src/main/java/com/yahoo/ycsb/Utils.java b/core/src/main/java/com/yahoo/ycsb/Utils.java
index 5fe699afdaa311d1953b4f2b1674edfcc345750d..05199fd7dd266ea09d9162df3c20369bd86f7d97 100644
--- a/core/src/main/java/com/yahoo/ycsb/Utils.java
+++ b/core/src/main/java/com/yahoo/ycsb/Utils.java
@@ -17,6 +17,12 @@
 
 package com.yahoo.ycsb;
 
+import java.lang.management.GarbageCollectorMXBean;
+import java.lang.management.ManagementFactory;
+import java.lang.management.OperatingSystemMXBean;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 import java.util.Random;
 
 /**
@@ -174,4 +180,94 @@ public class Utils
       public static byte[] doubleToBytes(final double val) {
         return longToBytes(Double.doubleToRawLongBits(val));
       }
+      
+      /**
+       * Measure the estimated active thread count in the current thread group.
+       * Since this calls {@link Thread.activeCount} it should be called from the
+       * main thread or one started by the main thread. Threads included in the
+       * count can be in any state. 
+       * For a more accurate count we could use {@link Thread.getAllStackTraces().size()} 
+       * but that freezes the JVM and incurs a high overhead.
+       * @return An estimated thread count, good for showing the thread count
+       * over time.
+       */
+      public static int getActiveThreadCount() {
+        return Thread.activeCount();
+      }
+      
+      /** @return The currently used memory in bytes */
+      public static long getUsedMemoryBytes() {
+        final Runtime runtime = Runtime.getRuntime();
+        return runtime.totalMemory() - runtime.freeMemory();
+      }
+      
+      /** @return The currently used memory in megabytes. */
+      public static int getUsedMemoryMegaBytes() {
+        return (int)(getUsedMemoryBytes() / 1024 / 1024);
+      }
+      
+      /** @return The current system load average if supported by the JDK. 
+       * If it's not supported, the value will be negative. */
+      public static double getSystemLoadAverage() {
+        final OperatingSystemMXBean osBean = 
+            ManagementFactory.getOperatingSystemMXBean();
+        return osBean.getSystemLoadAverage();
+      }
+      
+      /** @return The total number of garbage collections executed for all 
+       * memory pools. */ 
+      public static long getGCTotalCollectionCount() {
+        final List<GarbageCollectorMXBean> gcBeans = 
+            ManagementFactory.getGarbageCollectorMXBeans();
+        long count = 0;
+        for (final GarbageCollectorMXBean bean : gcBeans) {
+          if (bean.getCollectionCount() < 0) {
+            continue;
+          }
+          count += bean.getCollectionCount();
+        }
+        return count;
+      }
+      
+      /** @return The total time, in milliseconds, spent in GC. */ 
+      public static long getGCTotalTime() {
+        final List<GarbageCollectorMXBean> gcBeans = 
+            ManagementFactory.getGarbageCollectorMXBeans();
+        long time = 0;
+        for (final GarbageCollectorMXBean bean : gcBeans) {
+          if (bean.getCollectionTime() < 0) {
+            continue;
+          }
+          time += bean.getCollectionTime();
+        }
+        return time;
+      }
+
+      /**
+       * Returns a map of garbage collectors and their stats.
+       * The first object in the array is the total count since JVM start and the
+       * second is the total time (ms) since JVM start. 
+       * If a garbage collectors does not support the collector MXBean, then it
+       * will not be represented in the map.
+       * @return A non-null map of garbage collectors and their metrics. The map
+       * may be empty.
+       */ 
+      public static Map<String, Long[]> getGCStatst() {
+        final List<GarbageCollectorMXBean> gcBeans = 
+            ManagementFactory.getGarbageCollectorMXBeans();
+        final Map<String, Long[]> map = new HashMap<String, Long[]>(gcBeans.size());
+        for (final GarbageCollectorMXBean bean : gcBeans) {
+          if (!bean.isValid() || bean.getCollectionCount() < 0 || 
+              bean.getCollectionTime() < 0) {
+            continue;
+          }
+          
+          final Long[] measurements = new Long[] {
+              bean.getCollectionCount(),
+              bean.getCollectionTime()
+          };
+          map.put(bean.getName().replace(" ", "_"), measurements);
+        }
+        return map;
+      }
 }
diff --git a/core/src/main/java/com/yahoo/ycsb/Workload.java b/core/src/main/java/com/yahoo/ycsb/Workload.java
index 6b3e8ba74142c22a003cb676189898f38f27b01c..7bebafbf3f913acea21082535cabf2c3f1a237d5 100644
--- a/core/src/main/java/com/yahoo/ycsb/Workload.java
+++ b/core/src/main/java/com/yahoo/ycsb/Workload.java
@@ -1,24 +1,25 @@
-/**                                                                                                                                                                                
- * Copyright (c) 2010 Yahoo! Inc. All rights reserved.                                                                                                                             
- *                                                                                                                                                                                 
- * Licensed under the Apache License, Version 2.0 (the "License"); you                                                                                                             
- * may not use this file except in compliance with the License. You                                                                                                                
- * may obtain a copy of the License at                                                                                                                                             
- *                                                                                                                                                                                 
- * http://www.apache.org/licenses/LICENSE-2.0                                                                                                                                      
- *                                                                                                                                                                                 
- * Unless required by applicable law or agreed to in writing, software                                                                                                             
- * distributed under the License is distributed on an "AS IS" BASIS,                                                                                                               
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or                                                                                                                 
- * implied. See the License for the specific language governing                                                                                                                    
- * permissions and limitations under the License. See accompanying                                                                                                                 
- * LICENSE file.                                                                                                                                                                   
+/**
+ * Copyright (c) 2010 Yahoo! Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
  */
 
 package com.yahoo.ycsb;
 
-import java.util.Properties;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.Properties;
+
 
 /**
  * One experiment scenario. One object of this type will
@@ -27,86 +28,86 @@ import java.util.concurrent.atomic.AtomicBoolean;
  * load it dynamically. Any argument-based initialization should be
  * done by init().
  * 
- * If you extend this class, you should support the "insertstart" property. This 
- * allows the load phase to proceed from multiple clients on different machines, in case
+ * If you extend this class, you should support the "insertstart" property. This
+ * allows the Client to proceed from multiple clients on different machines, in case
  * the client is the bottleneck. For example, if we want to load 1 million records from
  * 2 machines, the first machine should have insertstart=0 and the second insertstart=500000. Additionally,
  * the "insertcount" property, which is interpreted by Client, can be used to tell each instance of the
  * client how many inserts to do. In the example above, both clients should have insertcount=500000.
  */
-public abstract class Workload
-{
-	public static final String INSERT_START_PROPERTY="insertstart";
-	
-	public static final String INSERT_START_PROPERTY_DEFAULT="0";
-	
-	private volatile AtomicBoolean stopRequested = new AtomicBoolean(false);
-	
-      /**
-       * Initialize the scenario. Create any generators and other shared objects here.
-       * Called once, in the main client thread, before any operations are started.
-       */
-      public void init(Properties p) throws WorkloadException
-      {
-      }
+public abstract class Workload {
+  public static final String INSERT_START_PROPERTY = "insertstart";
+  public static final String INSERT_COUNT_PROPERTY = "insertcount";
+  
+  public static final String INSERT_START_PROPERTY_DEFAULT = "0";
+  
+  private volatile AtomicBoolean stopRequested = new AtomicBoolean(false);
+  
+  /**
+   * Initialize the scenario. Create any generators and other shared objects here.
+   * Called once, in the main client thread, before any operations are started.
+   */
+  public void init(Properties p) throws WorkloadException {
+  }
 
-      /**
-       * Initialize any state for a particular client thread. Since the scenario object
-       * will be shared among all threads, this is the place to create any state that is specific
-       * to one thread. To be clear, this means the returned object should be created anew on each
-       * call to initThread(); do not return the same object multiple times. 
-       * The returned object will be passed to invocations of doInsert() and doTransaction() 
-       * for this thread. There should be no side effects from this call; all state should be encapsulated
-       * in the returned object. If you have no state to retain for this thread, return null. (But if you have
-       * no state to retain for this thread, probably you don't need to override initThread().)
-       * 
-       * @return false if the workload knows it is done for this thread. Client will terminate the thread. Return true otherwise. Return true for workloads that rely on operationcount. For workloads that read traces from a file, return true when there are more to do, false when you are done.
-       */
-      public Object initThread(Properties p, int mythreadid, int threadcount) throws WorkloadException
-      {
-	 return null;
-      }
-      
-      /**
-       * Cleanup the scenario. Called once, in the main client thread, after all operations have completed.
-       */
-      public void cleanup() throws WorkloadException
-      {
-      }
-      
-      /**
-       * Do one insert operation. Because it will be called concurrently from multiple client threads, this 
-       * function must be thread safe. However, avoid synchronized, or the threads will block waiting for each 
-       * other, and it will be difficult to reach the target throughput. Ideally, this function would have no side
-       * effects other than DB operations and mutations on threadstate. Mutations to threadstate do not need to be
-       * synchronized, since each thread has its own threadstate instance.
-       */
-      public abstract boolean doInsert(DB db, Object threadstate);
+  /**
+   * Initialize any state for a particular client thread. Since the scenario object
+   * will be shared among all threads, this is the place to create any state that is specific
+   * to one thread. To be clear, this means the returned object should be created anew on each
+   * call to initThread(); do not return the same object multiple times.
+   * The returned object will be passed to invocations of doInsert() and doTransaction()
+   * for this thread. There should be no side effects from this call; all state should be encapsulated
+   * in the returned object. If you have no state to retain for this thread, return null. (But if you have
+   * no state to retain for this thread, probably you don't need to override initThread().)
+   * 
+   * @return false if the workload knows it is done for this thread. Client will terminate the thread.
+   * Return true otherwise. Return true for workloads that rely on operationcount. For workloads that read
+   * traces from a file, return true when there are more to do, false when you are done.
+   */
+  public Object initThread(Properties p, int mythreadid, int threadcount) throws WorkloadException {
+    return null;
+  }
       
-      /**
-       * Do one transaction operation. Because it will be called concurrently from multiple client threads, this 
-       * function must be thread safe. However, avoid synchronized, or the threads will block waiting for each 
-       * other, and it will be difficult to reach the target throughput. Ideally, this function would have no side
-       * effects other than DB operations and mutations on threadstate. Mutations to threadstate do not need to be
-       * synchronized, since each thread has its own threadstate instance.
-       * 
-       * @return false if the workload knows it is done for this thread. Client will terminate the thread. Return true otherwise. Return true for workloads that rely on operationcount. For workloads that read traces from a file, return true when there are more to do, false when you are done.
-       */
-      public abstract boolean doTransaction(DB db, Object threadstate);
-      
-      /**
-       * Allows scheduling a request to stop the workload.
-       */
-      public void requestStop() {
-        stopRequested.set(true);
-      }
-      
-      /**
-       * Check the status of the stop request flag.
-       * @return true if stop was requested, false otherwise.
-       */
-      public boolean isStopRequested() {
-        if (stopRequested.get() == true) return true;
-        else return false;
-      }
+  /**
+   * Cleanup the scenario. Called once, in the main client thread, after all operations have completed.
+   */
+  public void cleanup() throws WorkloadException {
+  }
+
+  /**
+   * Do one insert operation. Because it will be called concurrently from multiple client threads, this
+   * function must be thread safe. However, avoid synchronized, or the threads will block waiting for each
+   * other, and it will be difficult to reach the target throughput. Ideally, this function would have no side
+   * effects other than DB operations and mutations on threadstate. Mutations to threadstate do not need to be
+   * synchronized, since each thread has its own threadstate instance.
+   */
+  public abstract boolean doInsert(DB db, Object threadstate);
+
+  /**
+   * Do one transaction operation. Because it will be called concurrently from multiple client threads, this
+   * function must be thread safe. However, avoid synchronized, or the threads will block waiting for each
+   * other, and it will be difficult to reach the target throughput. Ideally, this function would have no side
+   * effects other than DB operations and mutations on threadstate. Mutations to threadstate do not need to be
+   * synchronized, since each thread has its own threadstate instance.
+   * 
+   * @return false if the workload knows it is done for this thread. Client will terminate the thread. 
+   * Return true otherwise. Return true for workloads that rely on operationcount. For workloads that read
+   * traces from a file, return true when there are more to do, false when you are done.
+   */
+  public abstract boolean doTransaction(DB db, Object threadstate);
+
+  /**
+   * Allows scheduling a request to stop the workload.
+   */
+  public void requestStop() {
+    stopRequested.set(true);
+  }
+
+  /**
+   * Check the status of the stop request flag.
+   * @return true if stop was requested, false otherwise.
+   */
+  public boolean isStopRequested() {
+    return stopRequested.get();
+  }
 }
diff --git a/core/src/main/java/com/yahoo/ycsb/generator/IncrementingPrintableStringGenerator.java b/core/src/main/java/com/yahoo/ycsb/generator/IncrementingPrintableStringGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..82406f020a1a64c37b2c2cc0891da046c52120a8
--- /dev/null
+++ b/core/src/main/java/com/yahoo/ycsb/generator/IncrementingPrintableStringGenerator.java
@@ -0,0 +1,389 @@
+/**
+ * Copyright (c) 2016 YCSB contributors. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+package com.yahoo.ycsb.generator;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.Set;
+
+/**
+ * A generator that produces strings of {@link #length} using a set of code points
+ * from {@link #characterSet}. Each time {@link #nextValue()} is executed, the string
+ * is incremented by one character. Eventually the string may rollover to the beginning
+ * and the user may choose to have the generator throw a NoSuchElementException at that 
+ * point or continue incrementing. (By default the generator will continue incrementing).
+ * <p>
+ * For example, if we set a length of 2 characters and the character set includes
+ * [A, B] then the generator output will be:
+ * <ul>
+ * <li>AA</li>
+ * <li>AB</li>
+ * <li>BA</li>
+ * <li>BB</li>
+ * <li>AA <-- rolled over</li>
+ * </ul>
+ * <p>
+ * This class includes some default character sets to choose from including ASCII
+ * and plane 0 UTF. 
+ */
+public class IncrementingPrintableStringGenerator extends Generator<String> {
+
+  /** Default string length for the generator. */
+  public static final int DEFAULTSTRINGLENGTH = 8;
+  
+  /**
+   * Set of all character types that include every symbol other than non-printable
+   * control characters.
+   */
+  public static final Set<Integer> CHAR_TYPES_ALL_BUT_CONTROL;
+  static {
+    CHAR_TYPES_ALL_BUT_CONTROL = new HashSet<Integer>(24);
+    // numbers
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.DECIMAL_DIGIT_NUMBER);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.LETTER_NUMBER);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.OTHER_NUMBER);
+    
+    // letters
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.UPPERCASE_LETTER);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.LOWERCASE_LETTER);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.TITLECASE_LETTER);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.OTHER_LETTER);
+    
+    // marks
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.COMBINING_SPACING_MARK);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.NON_SPACING_MARK);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.ENCLOSING_MARK);
+    
+    // punctuation
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.CONNECTOR_PUNCTUATION);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.DASH_PUNCTUATION);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.START_PUNCTUATION);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.END_PUNCTUATION);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.INITIAL_QUOTE_PUNCTUATION);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.FINAL_QUOTE_PUNCTUATION);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.OTHER_PUNCTUATION);
+    
+    // symbols
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.MATH_SYMBOL);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.CURRENCY_SYMBOL);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.MODIFIER_SYMBOL);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.OTHER_SYMBOL);
+    
+    // separators
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.SPACE_SEPARATOR);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.LINE_SEPARATOR);
+    CHAR_TYPES_ALL_BUT_CONTROL.add((int)Character.PARAGRAPH_SEPARATOR);
+  }
+  
+  /**
+   * Set of character types including only decimals, upper and lower case letters.
+   */
+  public static final Set<Integer> CHAR_TYPES_BASIC_ALPHA;
+  static {
+    CHAR_TYPES_BASIC_ALPHA = new HashSet<Integer>(2);
+    CHAR_TYPES_BASIC_ALPHA.add((int)Character.UPPERCASE_LETTER);
+    CHAR_TYPES_BASIC_ALPHA.add((int)Character.LOWERCASE_LETTER);
+  }
+  
+  /**
+   * Set of character types including only  decimals, upper and lower case letters.
+   */
+  public static final Set<Integer> CHAR_TYPES_BASIC_ALPHANUMERICS;
+  static {
+    CHAR_TYPES_BASIC_ALPHANUMERICS = new HashSet<Integer>(3);
+    CHAR_TYPES_BASIC_ALPHANUMERICS.add((int)Character.DECIMAL_DIGIT_NUMBER);
+    CHAR_TYPES_BASIC_ALPHANUMERICS.add((int)Character.UPPERCASE_LETTER);
+    CHAR_TYPES_BASIC_ALPHANUMERICS.add((int)Character.LOWERCASE_LETTER);
+  }
+  
+  /**
+   * Set of character types including only decimals, letter numbers, 
+   * other numbers, upper, lower, title case as well as letter modifiers 
+   * and other letters.
+   */
+  public static final Set<Integer> CHAR_TYPE_EXTENDED_ALPHANUMERICS;
+  static {
+    CHAR_TYPE_EXTENDED_ALPHANUMERICS = new HashSet<Integer>(8);
+    CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int)Character.DECIMAL_DIGIT_NUMBER);
+    CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int)Character.LETTER_NUMBER);
+    CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int)Character.OTHER_NUMBER);
+    CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int)Character.UPPERCASE_LETTER);
+    CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int)Character.LOWERCASE_LETTER);
+    CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int)Character.TITLECASE_LETTER);
+    CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int)Character.MODIFIER_LETTER);
+    CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int)Character.OTHER_LETTER);
+  }
+  
+  /** The character set to iterate over. */
+  private final int[] characterSet;
+  
+  /** An array indices matching a position in the output string. */
+  private int[] indices; 
+  
+  /** The length of the output string in characters. */
+  private final int length;
+  
+  /** The last value returned by the generator. Should be null if {@link #nextValue()} 
+   * has not been called.*/
+  private String lastValue;
+  
+  /** Whether or not to throw an exception when the string rolls over. */
+  private boolean throwExceptionOnRollover;
+  
+  /** Whether or not the generator has rolled over. */
+  private boolean hasRolledOver;
+  
+  /**
+   * Generates strings of 8 characters using only the upper and lower case alphabetical
+   * characters from the ASCII set. 
+   */
+  public IncrementingPrintableStringGenerator() {
+    this(DEFAULTSTRINGLENGTH, printableBasicAlphaASCIISet());
+  }
+  
+  /**
+   * Generates strings of {@link #length} characters using only the upper and lower 
+   * case alphabetical characters from the ASCII set. 
+   * @param length The length of string to return from the generator.
+   * @throws IllegalArgumentException if the length is less than one.
+   */
+  public IncrementingPrintableStringGenerator(final int length) {
+    this(length, printableBasicAlphaASCIISet());
+  }
+  
+  /**
+   * Generates strings of {@link #length} characters using the code points in
+   * {@link #characterSet}.
+   * @param length The length of string to return from the generator.
+   * @param characterSet A set of code points to choose from. Code points in the 
+   * set can be in any order, not necessarily lexical.
+   * @throws IllegalArgumentException if the length is less than one or the character
+   * set has fewer than one code points.
+   */
+  public IncrementingPrintableStringGenerator(final int length, final int[] characterSet) {
+    if (length < 1) {
+      throw new IllegalArgumentException("Length must be greater than or equal to 1");
+    }
+    if (characterSet == null || characterSet.length < 1) {
+      throw new IllegalArgumentException("Character set must have at least one character");
+    }
+    this.length = length;
+    this.characterSet = characterSet;
+    indices = new int[length];
+  }
+  
+  @Override
+  public String nextValue() {
+    if (hasRolledOver && throwExceptionOnRollover) {
+      throw new NoSuchElementException("The generator has rolled over to the beginning");
+    }
+    
+    final StringBuilder buffer = new StringBuilder(length);
+    for (int i = 0; i < length; i++) {
+      buffer.append(Character.toChars(characterSet[indices[i]]));
+    }
+    
+    // increment the indices;
+    for (int i = length - 1; i >= 0; --i) {
+      if (indices[i] >= characterSet.length - 1) {
+        indices[i] = 0;
+        if (i == 0 || characterSet.length == 1 && lastValue != null) {
+          hasRolledOver = true;
+        }
+      } else {
+        ++indices[i];
+        break;
+      }
+    }
+    
+    lastValue = buffer.toString();
+    return lastValue;
+  }
+
+  @Override
+  public String lastValue() {
+    return lastValue;
+  }
+
+  /** @param exceptionOnRollover Whether or not to throw an exception on rollover. */
+  public void setThrowExceptionOnRollover(final boolean exceptionOnRollover) {
+    this.throwExceptionOnRollover = exceptionOnRollover;
+  }
+  
+  /** @return Whether or not to throw an exception on rollover. */
+  public boolean getThrowExceptionOnRollover() {
+    return throwExceptionOnRollover;
+  }
+  
+  /**
+   * Returns an array of printable code points with only the upper and lower
+   * case alphabetical characters from the basic ASCII set.
+   * @return An array of code points
+   */
+  public static int[] printableBasicAlphaASCIISet() {
+    final List<Integer> validCharacters = 
+        generatePrintableCharacterSet(0, 127, null, false, CHAR_TYPES_BASIC_ALPHA);
+    final int[] characterSet = new int[validCharacters.size()];
+    for (int i = 0; i < validCharacters.size(); i++) {
+      characterSet[i] = validCharacters.get(i);
+    }
+    return characterSet;
+  }
+  
+  /**
+   * Returns an array of printable code points with the upper and lower case 
+   * alphabetical characters as well as the numeric values from the basic 
+   * ASCII set.
+   * @return An array of code points
+   */
+  public static int[] printableBasicAlphaNumericASCIISet() {
+    final List<Integer> validCharacters = 
+        generatePrintableCharacterSet(0, 127, null, false, CHAR_TYPES_BASIC_ALPHANUMERICS);
+    final int[] characterSet = new int[validCharacters.size()];
+    for (int i = 0; i < validCharacters.size(); i++) {
+      characterSet[i] = validCharacters.get(i);
+    }
+    return characterSet;
+  }
+  
+  /**
+   * Returns an array of printable code points with the entire basic ASCII table,
+   * including spaces. Excludes new lines.
+   * @return An array of code points
+   */
+  public static int[] fullPrintableBasicASCIISet() {
+    final List<Integer> validCharacters = 
+        generatePrintableCharacterSet(32, 127, null, false, null);
+    final int[] characterSet = new int[validCharacters.size()];
+    for (int i = 0; i < validCharacters.size(); i++) {
+      characterSet[i] = validCharacters.get(i);
+    }
+    return characterSet;
+  }
+  
+  /**
+   * Returns an array of printable code points with the entire basic ASCII table,
+   * including spaces and new lines.
+   * @return An array of code points
+   */
+  public static int[] fullPrintableBasicASCIISetWithNewlines() {
+    final List<Integer> validCharacters =new ArrayList<Integer>();
+    validCharacters.add(10); // newline
+    validCharacters.addAll(generatePrintableCharacterSet(32, 127, null, false, null));
+    final int[] characterSet = new int[validCharacters.size()];
+    for (int i = 0; i < validCharacters.size(); i++) {
+      characterSet[i] = validCharacters.get(i);
+    }
+    return characterSet;
+  }
+  
+  /**
+   * Returns an array of printable code points the first plane of Unicode characters
+   * including only the alpha-numeric values.
+   * @return An array of code points
+   */
+  public static int[] printableAlphaNumericPlaneZeroSet() {
+    final List<Integer> validCharacters = 
+        generatePrintableCharacterSet(0, 65535, null, false, CHAR_TYPES_BASIC_ALPHANUMERICS);
+    final int[] characterSet = new int[validCharacters.size()];
+    for (int i = 0; i < validCharacters.size(); i++) {
+      characterSet[i] = validCharacters.get(i);
+    }
+    return characterSet;
+  }
+  
+  /**
+   * Returns an array of printable code points the first plane of Unicode characters
+   * including all printable characters.
+   * @return An array of code points
+   */
+  public static int[] fullPrintablePlaneZeroSet() {
+    final List<Integer> validCharacters = 
+        generatePrintableCharacterSet(0, 65535, null, false, CHAR_TYPES_ALL_BUT_CONTROL);
+    final int[] characterSet = new int[validCharacters.size()];
+    for (int i = 0; i < validCharacters.size(); i++) {
+      characterSet[i] = validCharacters.get(i);
+    }
+    return characterSet;
+  }
+  
+  /**
+   * Generates a list of code points based on a range and filters.
+   * These can be used for generating strings with various ASCII and/or
+   * Unicode printable character sets for use with DBs that may have 
+   * character limitations.
+   * <p>
+   * Note that control, surrogate, format, private use and unassigned 
+   * code points are skipped.
+   * @param startCodePoint The starting code point, inclusive.
+   * @param lastCodePoint The final code point, inclusive.
+   * @param characterTypesFilter An optional set of allowable character
+   * types. See {@link Character} for types.
+   * @param isFilterAllowableList Determines whether the {@code allowableTypes}
+   * set is inclusive or exclusive. When true, only those code points that
+   * appear in the list will be included in the resulting set. Otherwise
+   * matching code points are excluded.
+   * @param allowableTypes An optional list of code points for inclusion or
+   * exclusion.
+   * @return A list of code points matching the given range and filters. The
+   * list may be empty but is guaranteed not to be null.
+   */
+  public static List<Integer> generatePrintableCharacterSet(
+      final int startCodePoint,
+      final int lastCodePoint, 
+      final Set<Integer> characterTypesFilter,
+      final boolean isFilterAllowableList,
+      final Set<Integer> allowableTypes) {
+    
+    // since we don't know the final size of the allowable character list we
+    // start with a list then we'll flatten it to an array.
+    final List<Integer> validCharacters = new ArrayList<Integer>(lastCodePoint);
+    
+    for (int codePoint = startCodePoint; codePoint <= lastCodePoint; ++codePoint) {
+      if (allowableTypes != null && 
+          !allowableTypes.contains(Character.getType(codePoint))) {
+        continue;
+      } else {
+        // skip control points, formats, surrogates, etc
+        final int type = Character.getType(codePoint);
+        if (type == Character.CONTROL ||
+            type == Character.SURROGATE ||
+            type == Character.FORMAT ||
+            type == Character.PRIVATE_USE ||
+            type == Character.UNASSIGNED) {
+          continue;
+        }
+      }
+      
+      if (characterTypesFilter != null) {
+        // if the filter is enabled then we need to make sure the code point 
+        // is in the allowable list if it's a whitelist or that the code point
+        // is NOT in the list if it's a blacklist.
+        if ((isFilterAllowableList && !characterTypesFilter.contains(codePoint)) ||
+            (characterTypesFilter.contains(codePoint))) {
+          continue;
+        }
+      }
+      
+      validCharacters.add(codePoint);
+    }
+    return validCharacters;
+  }
+  
+}
diff --git a/core/src/main/java/com/yahoo/ycsb/generator/SequentialGenerator.java b/core/src/main/java/com/yahoo/ycsb/generator/SequentialGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..4d53385e312a5c8ab30a3a955643deb00a4b7bbc
--- /dev/null
+++ b/core/src/main/java/com/yahoo/ycsb/generator/SequentialGenerator.java
@@ -0,0 +1,65 @@
+/**
+ * Copyright (c) 2016 YCSB Contributors All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+
+package com.yahoo.ycsb.generator;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Generates a sequence of integers 0, 1, ...
+ */
+public class SequentialGenerator extends NumberGenerator {
+  final AtomicInteger counter;
+  int _interval, _countstart;
+
+  /**
+   * Create a counter that starts at countstart.
+   */
+  public SequentialGenerator(int countstart, int countend) {
+    counter = new AtomicInteger();
+    setLastValue(counter.get());
+    _countstart = countstart;
+    _interval = countend - countstart + 1;
+  }
+  
+  /**
+   * If the generator returns numeric (integer) values, return the next value as an int.
+   * Default is to return -1, which is appropriate for generators that do not return numeric values.
+   */
+  public int nextInt() {
+    int ret = _countstart + counter.getAndIncrement() % _interval;
+    setLastValue(ret);
+    return ret;
+  }
+  
+  @Override
+  public Number nextValue() {
+    int ret = _countstart + counter.getAndIncrement() % _interval;
+    setLastValue(ret);
+    return ret;
+  }
+  
+  @Override
+  public Number lastValue() {
+    return counter.get() + 1;
+  }
+  
+  @Override
+  public double mean() {
+    throw new UnsupportedOperationException("Can't compute mean of non-stationary distribution!");
+  }
+}
diff --git a/core/src/main/java/com/yahoo/ycsb/measurements/Measurements.java b/core/src/main/java/com/yahoo/ycsb/measurements/Measurements.java
index fe1e9cc16707ee8b3dce8855f2a0d4a95995c85c..26d340cc4ce8241a5d276a28e95aeb3a241753a8 100644
--- a/core/src/main/java/com/yahoo/ycsb/measurements/Measurements.java
+++ b/core/src/main/java/com/yahoo/ycsb/measurements/Measurements.java
@@ -49,6 +49,9 @@ public class Measurements {
   
   public static final String MEASUREMENT_INTERVAL = "measurement.interval";
   private static final String MEASUREMENT_INTERVAL_DEFAULT = "op";
+  
+  public static final String MEASUREMENT_TRACK_JVM_PROPERTY = "measurement.trackjvm";
+  public static final String MEASUREMENT_TRACK_JVM_PROPERTY_DEFAULT = "false";
 
   static Measurements singleton=null;
   static Properties measurementproperties=null;
diff --git a/core/src/main/java/com/yahoo/ycsb/workloads/CoreWorkload.java b/core/src/main/java/com/yahoo/ycsb/workloads/CoreWorkload.java
index ad127d8b464aaf059f08495834b7cd257afde8bc..b9ff7e734cf3b3224511de51e4f1b16253f00ff7 100644
--- a/core/src/main/java/com/yahoo/ycsb/workloads/CoreWorkload.java
+++ b/core/src/main/java/com/yahoo/ycsb/workloads/CoreWorkload.java
@@ -1,5 +1,5 @@
 /**
- * Copyright (c) 2010 Yahoo! Inc. All rights reserved.
+ * Copyright (c) 2010 Yahoo! Inc., Copyright (c) 2016 YCSB contributors. All rights reserved.
  *
  * Licensed under the Apache License, Version 2.0 (the "License"); you
  * may not use this file except in compliance with the License. You
@@ -21,26 +21,28 @@ import java.util.Properties;
 
 import com.yahoo.ycsb.*;
 import com.yahoo.ycsb.generator.AcknowledgedCounterGenerator;
+import com.yahoo.ycsb.generator.ConstantIntegerGenerator;
 import com.yahoo.ycsb.generator.CounterGenerator;
 import com.yahoo.ycsb.generator.DiscreteGenerator;
 import com.yahoo.ycsb.generator.ExponentialGenerator;
-import com.yahoo.ycsb.generator.ConstantIntegerGenerator;
-import com.yahoo.ycsb.generator.HotspotIntegerGenerator;
 import com.yahoo.ycsb.generator.HistogramGenerator;
+import com.yahoo.ycsb.generator.HotspotIntegerGenerator;
 import com.yahoo.ycsb.generator.NumberGenerator;
 import com.yahoo.ycsb.generator.ScrambledZipfianGenerator;
+import com.yahoo.ycsb.generator.SequentialGenerator;
 import com.yahoo.ycsb.generator.SkewedLatestGenerator;
 import com.yahoo.ycsb.generator.UniformIntegerGenerator;
 import com.yahoo.ycsb.generator.ZipfianGenerator;
 import com.yahoo.ycsb.measurements.Measurements;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Vector;
 import java.util.List;
 import java.util.Map;
-import java.util.ArrayList;
+import java.util.Vector;
+
 
 /**
  * The core benchmark scenario. Represents a set of clients doing simple CRUD operations. The
@@ -61,10 +63,19 @@ import java.util.ArrayList;
  * <LI><b>readmodifywriteproportion</b>: what proportion of operations should be read a record,
  * modify it, write it back (default: 0)
  * <LI><b>requestdistribution</b>: what distribution should be used to select the records to operate
- * on - uniform, zipfian, hotspot, or latest (default: uniform)
+ * on - uniform, zipfian, hotspot, sequential, exponential or latest (default: uniform)
  * <LI><b>maxscanlength</b>: for scans, what is the maximum number of records to scan (default: 1000)
  * <LI><b>scanlengthdistribution</b>: for scans, what distribution should be used to choose the
  * number of records to scan, for each scan, between 1 and maxscanlength (default: uniform)
+ * <LI><b>insertstart</b>: for parallel loads and runs, defines the starting record for this
+ * YCSB instance (default: 0)
+ * <LI><b>insertcount</b>: for parallel loads and runs, defines the number of records for this
+ * YCSB instance (default: recordcount)
+ * <LI><b>zeropadding</b>: for generating a record sequence compatible with string sort order by
+ * 0 padding the record number. Controls the number of 0s to use for padding. (default: 1)
+ * For example for row 5, with zeropadding=1 you get 'user5' key and with zeropading=8 you get
+ * 'user00000005' key. In order to see its impact, zeropadding needs to be bigger than number of 
+ * digits in the record number.
  * <LI><b>insertorder</b>: should records be inserted in order by key ("ordered"), or in hashed
  * order ("hashed") (default: hashed)
  * </ul>
@@ -99,11 +110,11 @@ public class CoreWorkload extends Workload {
 
   /**
    * The name of the property for the field length distribution. Options are "uniform", "zipfian"
-   * (favoring short records), "constant", and "histogram".
+   * (favouring short records), "constant", and "histogram".
    *
    * If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
-   * fieldlength property.  If "histogram", then the
-   * histogram will be read from the filename specified in the "fieldlengthhistogram" property.
+   * fieldlength property. If "histogram", then the histogram will be read from the filename
+   * specified in the "fieldlengthhistogram" property.
    */
   public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
 
@@ -240,12 +251,24 @@ public class CoreWorkload extends Workload {
   public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
 
   /**
-   * The default distribution of requests across the keyspace
+   * The default distribution of requests across the keyspace.
    */
   public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
 
+   /**
+   * The name of the property for adding zero padding to record numbers in order to match
+   * string sort order. Controls the number of 0s to left pad with.
+   */
+  public static final String ZERO_PADDING_PROPERTY = "zeropadding";
+
   /**
-   * The name of the property for the max scan length (number of records)
+   * The default zero padding value. Matches integer sort order
+   */
+  public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
+
+  
+  /**
+   * The name of the property for the max scan length (number of records).
    */
   public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
 
@@ -322,6 +345,7 @@ public class CoreWorkload extends Workload {
   boolean orderedinserts;
 
   int recordcount;
+  int zeropadding;
 
   int insertionRetryLimit;
   int insertionRetryInterval;
@@ -371,21 +395,12 @@ public class CoreWorkload extends Workload {
       fieldnames.add("field" + i);
     }
     fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
-
-    double readproportion = Double.parseDouble(
-        p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
-    double updateproportion = Double.parseDouble(
-        p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
-    double insertproportion = Double.parseDouble(
-        p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
-    double scanproportion = Double.parseDouble(
-        p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
-    double readmodifywriteproportion = Double.parseDouble(p.getProperty(
-        READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
+    
     recordcount =
         Integer.parseInt(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
-    if (recordcount == 0)
+    if (recordcount == 0) {
       recordcount = Integer.MAX_VALUE;
+    }
     String requestdistrib =
         p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
     int maxscanlength =
@@ -395,6 +410,16 @@ public class CoreWorkload extends Workload {
 
     int insertstart =
         Integer.parseInt(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
+    int insertcount =
+        Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
+    // Confirm valid values for insertstart and insertcount in relation to recordcount
+    if (recordcount < (insertstart + insertcount)) {
+      System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
+      System.err.println("recordcount must be bigger than insertstart + insertcount.");
+      System.exit(-1);
+    }
+    zeropadding =
+        Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
 
     readallfields = Boolean.parseBoolean(
         p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
@@ -405,16 +430,14 @@ public class CoreWorkload extends Workload {
         p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
     // Confirm that fieldlengthgenerator returns a constant if data
     // integrity check requested.
-    if (dataintegrity
-        && !(p.getProperty(
-                 FIELD_LENGTH_DISTRIBUTION_PROPERTY,
-                 FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
+    if (dataintegrity && !(p.getProperty(
+          FIELD_LENGTH_DISTRIBUTION_PROPERTY,
+          FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
       System.err.println("Must have constant field size to check data integrity.");
       System.exit(-1);
     }
 
-    if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed")
-        == 0) {
+    if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
       orderedinserts = false;
     } else if (requestdistrib.compareTo("exponential") == 0) {
       double percentile = Double.parseDouble(p.getProperty(
@@ -429,31 +452,14 @@ public class CoreWorkload extends Workload {
     }
 
     keysequence = new CounterGenerator(insertstart);
-    operationchooser = new DiscreteGenerator();
-    if (readproportion > 0) {
-      operationchooser.addValue(readproportion, "READ");
-    }
-
-    if (updateproportion > 0) {
-      operationchooser.addValue(updateproportion, "UPDATE");
-    }
-
-    if (insertproportion > 0) {
-      operationchooser.addValue(insertproportion, "INSERT");
-    }
-
-    if (scanproportion > 0) {
-      operationchooser.addValue(scanproportion, "SCAN");
-    }
-
-    if (readmodifywriteproportion > 0) {
-      operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
-    }
+    operationchooser = createOperationGenerator(p);
 
     transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
     if (requestdistrib.compareTo("uniform") == 0) {
-      keychooser = new UniformIntegerGenerator(0, recordcount - 1);
-    } else if (requestdistrib.compareTo("zipfian") == 0) {
+      keychooser = new UniformIntegerGenerator(insertstart, insertstart + insertcount - 1);
+    } else if (requestdistrib.compareTo("sequential") == 0) {
+      keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
+    }else if (requestdistrib.compareTo("zipfian") == 0) {
       // it does this by generating a random "next key" in part by taking the modulus over the
       // number of keys.
       // If the number of keys changes, this would shift the modulus, and we don't want that to
@@ -463,11 +469,12 @@ public class CoreWorkload extends Workload {
       // plus the number of predicted keys as the total keyspace. then, if the generator picks a key
       // that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
       // the keyspace doesn't change from the perspective of the scrambled zipfian generator
-
+      final double insertproportion = Double.parseDouble(
+          p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
       int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
       int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
 
-      keychooser = new ScrambledZipfianGenerator(recordcount + expectednewkeys);
+      keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
     } else if (requestdistrib.compareTo("latest") == 0) {
       keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
     } else if (requestdistrib.equals("hotspot")) {
@@ -475,7 +482,8 @@ public class CoreWorkload extends Workload {
           Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
       double hotopnfraction =
           Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
-      keychooser = new HotspotIntegerGenerator(0, recordcount - 1, hotsetfraction, hotopnfraction);
+      keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
+          hotsetfraction, hotopnfraction);
     } else {
       throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
     }
@@ -493,7 +501,6 @@ public class CoreWorkload extends Workload {
 
     insertionRetryLimit = Integer.parseInt(p.getProperty(
         INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
-
     insertionRetryInterval = Integer.parseInt(p.getProperty(
         INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
   }
@@ -502,7 +509,13 @@ public class CoreWorkload extends Workload {
     if (!orderedinserts) {
       keynum = Utils.hash(keynum);
     }
-    return "user" + keynum;
+    String value = Long.toString(keynum);
+    int fill = zeropadding - value.length();
+    String prekey = "user";
+    for(int i=0; i<fill; i++) {
+      prekey += '0';
+    }
+    return prekey + value;
   }
 
   /**
@@ -613,21 +626,20 @@ public class CoreWorkload extends Workload {
   @Override
   public boolean doTransaction(DB db, Object threadstate) {
     switch (operationchooser.nextString()) {
-    
-      case "READ":
-        doTransactionRead(db);
-        break;
-      case "UPDATE":
-        doTransactionUpdate(db);
-        break;
-      case "INSERT": 
-        doTransactionInsert(db);
-        break;
-      case "SCAN":
-        doTransactionScan(db);
-        break;
-      default:
-        doTransactionReadModifyWrite(db);
+    case "READ":
+      doTransactionRead(db);
+      break;
+    case "UPDATE":
+      doTransactionUpdate(db);
+      break;
+    case "INSERT":
+      doTransactionInsert(db);
+      break;
+    case "SCAN":
+      doTransactionScan(db);
+      break;
+    default:
+      doTransactionReadModifyWrite(db);
     } 
 
     return true;
@@ -801,4 +813,51 @@ public class CoreWorkload extends Workload {
       transactioninsertkeysequence.acknowledge(keynum);
     }
   }
+
+  /**
+   * Creates a weighted discrete values with database operations for a workload to perform.
+   * Weights/proportions are read from the properties list and defaults are used
+   * when values are not configured.
+   * Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
+   * @param p The properties list to pull weights from.
+   * @return A generator that can be used to determine the next operation to perform.
+   * @throws IllegalArgumentException if the properties object was null.
+   */
+  public static DiscreteGenerator createOperationGenerator(final Properties p) {
+    if (p == null) {
+      throw new IllegalArgumentException("Properties object cannot be null");
+    }
+    final double readproportion = Double.parseDouble(
+        p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
+    final double updateproportion = Double.parseDouble(
+        p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
+    final double insertproportion = Double.parseDouble(
+        p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
+    final double scanproportion = Double.parseDouble(
+        p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
+    final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
+        READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
+    
+    final DiscreteGenerator operationchooser = new DiscreteGenerator();
+    if (readproportion > 0) {
+      operationchooser.addValue(readproportion, "READ");
+    }
+
+    if (updateproportion > 0) {
+      operationchooser.addValue(updateproportion, "UPDATE");
+    }
+
+    if (insertproportion > 0) {
+      operationchooser.addValue(insertproportion, "INSERT");
+    }
+
+    if (scanproportion > 0) {
+      operationchooser.addValue(scanproportion, "SCAN");
+    }
+
+    if (readmodifywriteproportion > 0) {
+      operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
+    }
+    return operationchooser;
+  }
 }
diff --git a/core/src/test/java/com/yahoo/ycsb/TestUtils.java b/core/src/test/java/com/yahoo/ycsb/TestUtils.java
index cde5177656b728fc7d2d636b78e65ec808cf733f..a84eca86bbde9bea4a85f87770da0b034758f8a3 100644
--- a/core/src/test/java/com/yahoo/ycsb/TestUtils.java
+++ b/core/src/test/java/com/yahoo/ycsb/TestUtils.java
@@ -99,6 +99,23 @@ public class TestUtils {
     Utils.bytesToDouble(new byte[] { 0, 0, 0, 0, 0, 0, 0 });
   }
   
+  @Test
+  public void jvmUtils() throws Exception {
+    // This should ALWAYS return at least one thread.
+    assertTrue(Utils.getActiveThreadCount() > 0);
+    // This should always be greater than 0 or something is goofed up in the JVM.
+    assertTrue(Utils.getUsedMemoryBytes() > 0);
+    // Some operating systems may not implement this so we don't have a good
+    // test. Just make sure it doesn't throw an exception.
+    Utils.getSystemLoadAverage();
+    // This will probably be zero but should never be negative.
+    assertTrue(Utils.getGCTotalCollectionCount() >= 0);
+    // Could be zero similar to GC total collection count
+    assertTrue(Utils.getGCTotalTime() >= 0);
+    // Could be empty
+    assertTrue(Utils.getGCStatst().size() >= 0);
+  }
+   
   /**
    * Since this version of TestNG doesn't appear to have an assertArrayEquals,
    * this will compare the two to make sure they're the same. 
diff --git a/core/src/test/java/com/yahoo/ycsb/generator/TestIncrementingPrintableStringGenerator.java b/core/src/test/java/com/yahoo/ycsb/generator/TestIncrementingPrintableStringGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..eea3d507861563f21ced4c7a12ff168316539159
--- /dev/null
+++ b/core/src/test/java/com/yahoo/ycsb/generator/TestIncrementingPrintableStringGenerator.java
@@ -0,0 +1,130 @@
+/**
+ * Copyright (c) 2016 YCSB contributors. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+package com.yahoo.ycsb.generator;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertNull;
+import static org.testng.Assert.fail;
+
+import java.util.NoSuchElementException;
+
+import org.testng.annotations.Test;
+
+public class TestIncrementingPrintableStringGenerator {
+  private final static int[] ATOC = new int[] { 65, 66, 67 };
+  
+  @Test
+  public void rolloverOK() throws Exception {
+    final IncrementingPrintableStringGenerator gen = 
+        new IncrementingPrintableStringGenerator(2, ATOC);
+    
+    assertNull(gen.lastValue());
+    assertEquals(gen.nextValue(), "AA");
+    assertEquals(gen.lastValue(), "AA");
+    assertEquals(gen.nextValue(), "AB");
+    assertEquals(gen.lastValue(), "AB");
+    assertEquals(gen.nextValue(), "AC");
+    assertEquals(gen.lastValue(), "AC");
+    assertEquals(gen.nextValue(), "BA");
+    assertEquals(gen.lastValue(), "BA");
+    assertEquals(gen.nextValue(), "BB");
+    assertEquals(gen.lastValue(), "BB");
+    assertEquals(gen.nextValue(), "BC");
+    assertEquals(gen.lastValue(), "BC");
+    assertEquals(gen.nextValue(), "CA");
+    assertEquals(gen.lastValue(), "CA");
+    assertEquals(gen.nextValue(), "CB");
+    assertEquals(gen.lastValue(), "CB");
+    assertEquals(gen.nextValue(), "CC");
+    assertEquals(gen.lastValue(), "CC");
+    assertEquals(gen.nextValue(), "AA"); // <-- rollover
+    assertEquals(gen.lastValue(), "AA");
+  }
+  
+  @Test
+  public void rolloverOneCharacterOK() throws Exception {
+    // It would be silly to create a generator with one character.
+    final IncrementingPrintableStringGenerator gen = 
+        new IncrementingPrintableStringGenerator(2, new int[] { 65 });
+    for (int i = 0; i < 5; i++) {
+      assertEquals(gen.nextValue(), "AA");
+    }
+  }
+  
+  @Test
+  public void rolloverException() throws Exception {
+    final IncrementingPrintableStringGenerator gen = 
+        new IncrementingPrintableStringGenerator(2, ATOC);
+    gen.setThrowExceptionOnRollover(true);
+    
+    int i = 0;
+    try {
+      while(i < 11) {
+        ++i;
+        gen.nextValue();
+      }
+      fail("Expected NoSuchElementException");
+    } catch (NoSuchElementException e) {
+      assertEquals(i, 10);
+    }
+  }
+  
+  @Test
+  public void rolloverOneCharacterException() throws Exception {
+    // It would be silly to create a generator with one character.
+    final IncrementingPrintableStringGenerator gen = 
+        new IncrementingPrintableStringGenerator(2, new int[] { 65 });
+    gen.setThrowExceptionOnRollover(true);
+    
+    int i = 0;
+    try {
+      while(i < 3) {
+        ++i;
+        gen.nextValue();
+      }
+      fail("Expected NoSuchElementException");
+    } catch (NoSuchElementException e) {
+      assertEquals(i, 2);
+    }
+  }
+  
+  @Test
+  public void invalidLengths() throws Exception {
+    try {
+      new IncrementingPrintableStringGenerator(0, ATOC);
+      fail("Expected IllegalArgumentException");
+    } catch (IllegalArgumentException e) { }
+    
+    try {
+      new IncrementingPrintableStringGenerator(-42, ATOC);
+      fail("Expected IllegalArgumentException");
+    } catch (IllegalArgumentException e) { }
+  }
+  
+  @Test
+  public void invalidCharacterSets() throws Exception {
+    try {
+      new IncrementingPrintableStringGenerator(2, null);
+      fail("Expected IllegalArgumentException");
+    } catch (IllegalArgumentException e) { }
+    
+    try {
+      new IncrementingPrintableStringGenerator(2, new int[] {});
+      fail("Expected IllegalArgumentException");
+    } catch (IllegalArgumentException e) { }
+  }
+}
diff --git a/core/src/test/java/com/yahoo/ycsb/workloads/TestCoreWorkload.java b/core/src/test/java/com/yahoo/ycsb/workloads/TestCoreWorkload.java
new file mode 100644
index 0000000000000000000000000000000000000000..d52f29b248cd069596ae1dd5bf9f8473e1fcaa64
--- /dev/null
+++ b/core/src/test/java/com/yahoo/ycsb/workloads/TestCoreWorkload.java
@@ -0,0 +1,70 @@
+/**                                                                                                                                                                                
+ * Copyright (c) 2016 YCSB contributors. All rights reserved.                                                                                                                             
+ *                                                                                                                                                                                 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you                                                                                                             
+ * may not use this file except in compliance with the License. You                                                                                                                
+ * may obtain a copy of the License at                                                                                                                                             
+ *                                                                                                                                                                                 
+ * http://www.apache.org/licenses/LICENSE-2.0                                                                                                                                      
+ *                                                                                                                                                                                 
+ * Unless required by applicable law or agreed to in writing, software                                                                                                             
+ * distributed under the License is distributed on an "AS IS" BASIS,                                                                                                               
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or                                                                                                                 
+ * implied. See the License for the specific language governing                                                                                                                    
+ * permissions and limitations under the License. See accompanying                                                                                                                 
+ * LICENSE file.                                                                                                                                                                   
+ */
+package com.yahoo.ycsb.workloads;
+
+import static org.testng.Assert.assertTrue;
+
+import java.util.Properties;
+
+import org.testng.annotations.Test;
+
+import com.yahoo.ycsb.generator.DiscreteGenerator;
+
+public class TestCoreWorkload {
+
+  @Test
+  public void createOperationChooser() {
+    final Properties p = new Properties();
+    p.setProperty(CoreWorkload.READ_PROPORTION_PROPERTY, "0.20");
+    p.setProperty(CoreWorkload.UPDATE_PROPORTION_PROPERTY, "0.20");
+    p.setProperty(CoreWorkload.INSERT_PROPORTION_PROPERTY, "0.20");
+    p.setProperty(CoreWorkload.SCAN_PROPORTION_PROPERTY, "0.20");
+    p.setProperty(CoreWorkload.READMODIFYWRITE_PROPORTION_PROPERTY, "0.20");
+    final DiscreteGenerator generator = CoreWorkload.createOperationGenerator(p);
+    final int[] counts = new int[5];
+    
+    for (int i = 0; i < 100; ++i) {
+      switch (generator.nextString()) {
+      case "READ":
+        ++counts[0];
+        break;
+      case "UPDATE":
+        ++counts[1];
+        break;
+      case "INSERT": 
+        ++counts[2];
+        break;
+      case "SCAN":
+        ++counts[3];
+        break;
+      default:
+        ++counts[4];
+      } 
+    }
+    
+    for (int i : counts) {
+      // Doesn't do a wonderful job of equal distribution, but in a hundred, if we 
+      // don't see at least one of each operation then the generator is really broke.
+      assertTrue(i > 1);
+    }
+  }
+  
+  @Test (expectedExceptions = IllegalArgumentException.class)
+  public void createOperationChooserNullProperties() {
+    CoreWorkload.createOperationGenerator(null);
+  }
+}
\ No newline at end of file
diff --git a/couchbase/pom.xml b/couchbase/pom.xml
index 5f4780aa96eb68689befd4ed028d454470cab4b4..6f3157e97e1d2805f2790a7b4d757cc2000d80ff 100644
--- a/couchbase/pom.xml
+++ b/couchbase/pom.xml
@@ -22,7 +22,7 @@ LICENSE file.
     <parent>
         <groupId>com.yahoo.ycsb</groupId>
         <artifactId>binding-parent</artifactId>
-        <version>0.9.0-SNAPSHOT</version>
+        <version>0.10.0-SNAPSHOT</version>
         <relativePath>../binding-parent</relativePath>
     </parent>
 
diff --git a/couchbase2/README.md b/couchbase2/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..786060da43941099e8f4f90e70f967fa6b86c273
--- /dev/null
+++ b/couchbase2/README.md
@@ -0,0 +1,137 @@
+<!--
+Copyright (c) 2015 - 2016 YCSB contributors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you
+may not use this file except in compliance with the License. You
+may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied. See the License for the specific language governing
+permissions and limitations under the License. See accompanying
+LICENSE file.
+-->
+
+# Couchbase (SDK 2.x) Driver for YCSB
+This driver is a binding for the YCSB facilities to operate against a Couchbase Server cluster. It uses the official
+Couchbase Java SDK (version 2.x) and provides a rich set of configuration options, including support for the N1QL
+query language.
+
+## Quickstart
+
+### 1. Start Couchbase Server
+You need to start a single node or a cluster to point the client at. Please see [http://couchbase.com](couchbase.com)
+for more details and instructions.
+
+### 2. Set up YCSB
+You can either download the release zip and run it, or just clone from master.
+
+```
+git clone git://github.com/brianfrankcooper/YCSB.git
+cd YCSB
+mvn clean package
+```
+
+### 3. Run the Workload
+Before you can actually run the workload, you need to "load" the data first.
+
+```
+bin/ycsb load couchbase2 -s -P workloads/workloada
+```
+
+Then, you can run the workload:
+
+```
+bin/ycsb run couchbase2 -s -P workloads/workloada
+```
+
+Please see the general instructions in the `doc` folder if you are not sure how it all works. You can apply a property
+(as seen in the next section) like this:
+
+```
+bin/ycsb run couchbase -s -P workloads/workloada -p couchbase.epoll=true
+```
+
+## N1QL Index Setup
+In general, every time N1QL is used (either implicitly through using `workloade` or through setting `kv=false`) some
+kind of index must be present to make it work. Depending on the workload and data size, choosing the right index is
+crucial at runtime in order to get the best performance. If in doubt, please ask at the
+[forums](http://forums.couchbase.com) or get in touch with our team at Couchbase.
+
+For `workloade` and the default `readallfields=true` we recommend creating the following index, and if using Couchbase
+Server 4.5 or later with the "Memory Optimized Index" setting on the bucket.
+
+```
+CREATE INDEX wle_idx ON `bucketname`(meta().id);
+```
+
+For other workloads, different index setups might be even more performant.
+
+## Performance Considerations
+As it is with any benchmark, there are lot of knobs to tune in order to get great or (if you are reading
+this and trying to write a competitor benchmark ;-)) bad performance.
+
+The first setting you should consider, if you are running on Linux 64bit is setting `-p couchbase.epoll=true`. This will
+then turn on the Epoll IO mechanisms in the underlying Netty library which provides better performance since it has less
+synchronization to do than the NIO default. This only works on Linux, but you are benchmarking on the OS you are
+deploying to, right?
+
+The second option, `boost`, sounds more magic than it actually is. By default this benchmark trades CPU for throughput,
+but this can be disabled by setting `-p couchbase.boost=0`. This defaults to 3, and 3 is the number of event loops run
+in the IO layer. 3 is a reasonable default but you should set it to the number of **physical** cores you have available
+on the machine if you only plan to run one YCSB instance. Make sure (using profiling) to max out your cores, but don't
+overdo it.
+
+## Sync vs Async
+By default, since YCSB is sync the code will always wait for the operation to complete. In some cases it can be useful
+to just "drive load" and disable the waiting. Note that when the "-p couchbase.syncMutationResponse=false" option is
+used, the measured results by YCSB can basically be thrown away. Still helpful sometimes during load phases to speed
+them up :)
+
+## Debugging Latency
+The Couchbase Java SDK has the ability to collect and dump different kinds of metrics which allow you to analyze
+performance during benchmarking and production. By default this option is disabled in the benchmark, but by setting
+`couchbase.networkMetricsInterval` and/or `couchbase.runtimeMetricsInterval` to something greater than 0 it will
+output the information as JSON into the configured logger. The number provides is the interval in seconds. If you are
+unsure what interval to pick, start with 10 or 30 seconds, depending on your runtime length.
+
+This is how such logs look like:
+
+```
+INFO: {"heap.used":{"init":268435456,"used":36500912,"committed":232259584,"max":3817865216},"gc.ps marksweep.collectionTime":0,"gc.ps scavenge.collectionTime":54,"gc.ps scavenge.collectionCount":17,"thread.count":26,"offHeap.used":{"init":2555904,"used":30865944,"committed":31719424,"max":-1},"gc.ps marksweep.collectionCount":0,"heap.pendingFinalize":0,"thread.peakCount":26,"event":{"name":"RuntimeMetrics","type":"METRIC"},"thread.startedCount":28}
+INFO: {"localhost/127.0.0.1:11210":{"BINARY":{"ReplaceRequest":{"SUCCESS":{"metrics":{"percentiles":{"50.0":102,"90.0":136,"95.0":155,"99.0":244,"99.9":428},"min":55,"max":1564,"count":35787,"timeUnit":"MICROSECONDS"}}},"GetRequest":{"SUCCESS":{"metrics":{"percentiles":{"50.0":74,"90.0":98,"95.0":110,"99.0":158,"99.9":358},"min":34,"max":2310,"count":35604,"timeUnit":"MICROSECONDS"}}},"GetBucketConfigRequest":{"SUCCESS":{"metrics":{"percentiles":{"50.0":462,"90.0":462,"95.0":462,"99.0":462,"99.9":462},"min":460,"max":462,"count":1,"timeUnit":"MICROSECONDS"}}}}},"event":{"name":"NetworkLatencyMetrics","type":"METRIC"}}
+```
+
+It is recommended to either feed it into a program which can analyze and visualize JSON or just dump it into a JSON
+pretty printer and look at it manually. Since the output can be changed (only by changing the code at the moment), you
+can even configure to put those messages into another couchbase bucket and then analyze it through N1QL! You can learn
+more about this in general [in the official docs](http://developer.couchbase.com/documentation/server/4.0/sdks/java-2.2/event-bus-metrics.html).
+
+
+## Configuration Options
+Since no setup is the same and the goal of YCSB is to deliver realistic benchmarks, here are some setups that you can
+tune. Note that if you need more flexibility (let's say a custom transcoder), you still need to extend this driver and
+implement the facilities on your own.
+
+You can set the following properties (with the default settings applied):
+
+ - couchbase.host=127.0.0.1: The hostname from one server.
+ - couchbase.bucket=default: The bucket name to use.
+ - couchbase.password=: The password of the bucket.
+ - couchbase.syncMutationResponse=true: If mutations should wait for the response to complete.
+ - couchbase.persistTo=0: Persistence durability requirement
+ - couchbase.replicateTo=0: Replication durability requirement
+ - couchbase.upsert=false: Use upsert instead of insert or replace.
+ - couchbase.adhoc=false: If set to true, prepared statements are not used.
+ - couchbase.kv=true: If set to false, mutation operations will also be performed through N1QL.
+ - couchbase.maxParallelism=1: The server parallelism for all n1ql queries.
+ - couchbase.kvEndpoints=1: The number of KV sockets to open per server.
+ - couchbase.queryEndpoints=5: The number of N1QL Query sockets to open per server.
+ - couchbase.epoll=false: If Epoll instead of NIO should be used (only available for linux.
+ - couchbase.boost=3: If > 0 trades CPU for higher throughput. N is the number of event loops, ideally
+   set to the number of physical cores. Setting higher than that will likely degrade performance.
+ - couchbase.networkMetricsInterval=0: The interval in seconds when latency metrics will be logged.
+ - couchbase.runtimeMetricsInterval=0: The interval in seconds when runtime metrics will be logged.
\ No newline at end of file
diff --git a/couchbase2/pom.xml b/couchbase2/pom.xml
new file mode 100644
index 0000000000000000000000000000000000000000..a73ad4cbc2e67fafe036ed9aa03072b2e7ab0ac1
--- /dev/null
+++ b/couchbase2/pom.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2015 YCSB contributors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you
+may not use this file except in compliance with the License. You
+may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied. See the License for the specific language governing
+permissions and limitations under the License. See accompanying
+LICENSE file.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <groupId>com.yahoo.ycsb</groupId>
+        <artifactId>binding-parent</artifactId>
+        <version>0.10.0-SNAPSHOT</version>
+        <relativePath>../binding-parent</relativePath>
+    </parent>
+
+    <artifactId>couchbase2-binding</artifactId>
+    <name>Couchbase Java SDK 2.x Binding</name>
+    <packaging>jar</packaging>
+
+    <dependencies>
+        <dependency>
+            <groupId>com.couchbase.client</groupId>
+            <artifactId>java-client</artifactId>
+            <version>${couchbase2.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>com.yahoo.ycsb</groupId>
+            <artifactId>core</artifactId>
+            <version>${project.version}</version>
+            <scope>provided</scope>
+        </dependency>
+    </dependencies>
+
+</project>
diff --git a/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java b/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java
new file mode 100644
index 0000000000000000000000000000000000000000..3d0bc0398c76931539859af551c42b6379a664cb
--- /dev/null
+++ b/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java
@@ -0,0 +1,939 @@
+/**
+ * Copyright (c) 2016 Yahoo! Inc. All rights reserved.
+ * <p>
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+
+package com.yahoo.ycsb.db.couchbase2;
+
+import com.couchbase.client.core.env.DefaultCoreEnvironment;
+import com.couchbase.client.core.env.resources.IoPoolShutdownHook;
+import com.couchbase.client.core.logging.CouchbaseLogger;
+import com.couchbase.client.core.logging.CouchbaseLoggerFactory;
+import com.couchbase.client.core.metrics.DefaultLatencyMetricsCollectorConfig;
+import com.couchbase.client.core.metrics.DefaultMetricsCollectorConfig;
+import com.couchbase.client.core.metrics.LatencyMetricsCollectorConfig;
+import com.couchbase.client.core.metrics.MetricsCollectorConfig;
+import com.couchbase.client.deps.com.fasterxml.jackson.core.JsonFactory;
+import com.couchbase.client.deps.com.fasterxml.jackson.core.JsonGenerator;
+import com.couchbase.client.deps.com.fasterxml.jackson.databind.JsonNode;
+import com.couchbase.client.deps.com.fasterxml.jackson.databind.node.ObjectNode;
+import com.couchbase.client.deps.io.netty.channel.DefaultSelectStrategyFactory;
+import com.couchbase.client.deps.io.netty.channel.EventLoopGroup;
+import com.couchbase.client.deps.io.netty.channel.SelectStrategy;
+import com.couchbase.client.deps.io.netty.channel.SelectStrategyFactory;
+import com.couchbase.client.deps.io.netty.channel.epoll.EpollEventLoopGroup;
+import com.couchbase.client.deps.io.netty.channel.nio.NioEventLoopGroup;
+import com.couchbase.client.deps.io.netty.util.IntSupplier;
+import com.couchbase.client.deps.io.netty.util.concurrent.DefaultThreadFactory;
+import com.couchbase.client.java.Bucket;
+import com.couchbase.client.java.Cluster;
+import com.couchbase.client.java.CouchbaseCluster;
+import com.couchbase.client.java.PersistTo;
+import com.couchbase.client.java.ReplicateTo;
+import com.couchbase.client.java.document.Document;
+import com.couchbase.client.java.document.RawJsonDocument;
+import com.couchbase.client.java.document.json.JsonArray;
+import com.couchbase.client.java.document.json.JsonObject;
+import com.couchbase.client.java.env.CouchbaseEnvironment;
+import com.couchbase.client.java.env.DefaultCouchbaseEnvironment;
+import com.couchbase.client.java.error.TemporaryFailureException;
+import com.couchbase.client.java.query.*;
+import com.couchbase.client.java.transcoder.JacksonTransformers;
+import com.couchbase.client.java.util.Blocking;
+import com.yahoo.ycsb.ByteIterator;
+import com.yahoo.ycsb.DB;
+import com.yahoo.ycsb.DBException;
+import com.yahoo.ycsb.Status;
+import com.yahoo.ycsb.StringByteIterator;
+import rx.Observable;
+import rx.Subscriber;
+import rx.functions.Action1;
+import rx.functions.Func1;
+
+import java.io.StringWriter;
+import java.io.Writer;
+import java.nio.channels.spi.SelectorProvider;
+import java.util.*;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.LockSupport;
+
+/**
+ * A class that wraps the 2.x Couchbase SDK to be used with YCSB.
+ *
+ * <p> The following options can be passed when using this database client to override the defaults.
+ *
+ * <ul>
+ * <li><b>couchbase.host=127.0.0.1</b> The hostname from one server.</li>
+ * <li><b>couchbase.bucket=default</b> The bucket name to use.</li>
+ * <li><b>couchbase.password=</b> The password of the bucket.</li>
+ * <li><b>couchbase.syncMutationResponse=true</b> If mutations should wait for the response to complete.</li>
+ * <li><b>couchbase.persistTo=0</b> Persistence durability requirement</li>
+ * <li><b>couchbase.replicateTo=0</b> Replication durability requirement</li>
+ * <li><b>couchbase.upsert=false</b> Use upsert instead of insert or replace.</li>
+ * <li><b>couchbase.adhoc=false</b> If set to true, prepared statements are not used.</li>
+ * <li><b>couchbase.kv=true</b> If set to false, mutation operations will also be performed through N1QL.</li>
+ * <li><b>couchbase.maxParallelism=1</b> The server parallelism for all n1ql queries.</li>
+ * <li><b>couchbase.kvEndpoints=1</b> The number of KV sockets to open per server.</li>
+ * <li><b>couchbase.queryEndpoints=5</b> The number of N1QL Query sockets to open per server.</li>
+ * <li><b>couchbase.epoll=false</b> If Epoll instead of NIO should be used (only available for linux.</li>
+ * <li><b>couchbase.boost=3</b> If > 0 trades CPU for higher throughput. N is the number of event loops, ideally
+ *      set to the number of physical cores. Setting higher than that will likely degrade performance.</li>
+ * <li><b>couchbase.networkMetricsInterval=0</b> The interval in seconds when latency metrics will be logged.</li>
+ * <li><b>couchbase.runtimeMetricsInterval=0</b> The interval in seconds when runtime metrics will be logged.</li>
+ * </ul>
+ */
+public class Couchbase2Client extends DB {
+
+  static {
+    // No need to send the full encoded_plan for this benchmark workload, less network overhead!
+    System.setProperty("com.couchbase.query.encodedPlanEnabled", "false");
+  }
+
+  private static final String SEPARATOR = ":";
+  private static final CouchbaseLogger LOGGER = CouchbaseLoggerFactory.getInstance(Couchbase2Client.class);
+  private static final Object INIT_COORDINATOR = new Object();
+
+  private static volatile CouchbaseEnvironment env = null;
+
+  private Cluster cluster;
+  private Bucket bucket;
+  private String bucketName;
+  private boolean upsert;
+  private PersistTo persistTo;
+  private ReplicateTo replicateTo;
+  private boolean syncMutResponse;
+  private boolean epoll;
+  private long kvTimeout;
+  private boolean adhoc;
+  private boolean kv;
+  private int maxParallelism;
+  private String host;
+  private int kvEndpoints;
+  private int queryEndpoints;
+  private int boost;
+  private int networkMetricsInterval;
+  private int runtimeMetricsInterval;
+  private String scanAllQuery;
+
+  @Override
+  public void init() throws DBException {
+    Properties props = getProperties();
+
+    host = props.getProperty("couchbase.host", "127.0.0.1");
+    bucketName = props.getProperty("couchbase.bucket", "default");
+    String bucketPassword = props.getProperty("couchbase.password", "");
+
+    upsert = props.getProperty("couchbase.upsert", "false").equals("true");
+    persistTo = parsePersistTo(props.getProperty("couchbase.persistTo", "0"));
+    replicateTo = parseReplicateTo(props.getProperty("couchbase.replicateTo", "0"));
+    syncMutResponse = props.getProperty("couchbase.syncMutationResponse", "true").equals("true");
+    adhoc = props.getProperty("couchbase.adhoc", "false").equals("true");
+    kv = props.getProperty("couchbase.kv", "true").equals("true");
+    maxParallelism = Integer.parseInt(props.getProperty("couchbase.maxParallelism", "1"));
+    kvEndpoints = Integer.parseInt(props.getProperty("couchbase.kvEndpoints", "1"));
+    queryEndpoints = Integer.parseInt(props.getProperty("couchbase.queryEndpoints", "1"));
+    epoll = props.getProperty("couchbase.epoll", "false").equals("true");
+    boost = Integer.parseInt(props.getProperty("couchbase.boost", "3"));
+    networkMetricsInterval = Integer.parseInt(props.getProperty("couchbase.networkMetricsInterval", "0"));
+    runtimeMetricsInterval = Integer.parseInt(props.getProperty("couchbase.runtimeMetricsInterval", "0"));
+    scanAllQuery =  "SELECT meta().id as id FROM `" + bucketName + "` WHERE meta().id >= '$1' LIMIT $2";
+
+    try {
+      synchronized (INIT_COORDINATOR) {
+        if (env == null) {
+
+          LatencyMetricsCollectorConfig latencyConfig = networkMetricsInterval <= 0
+              ? DefaultLatencyMetricsCollectorConfig.disabled()
+              : DefaultLatencyMetricsCollectorConfig
+                  .builder()
+                  .emitFrequency(networkMetricsInterval)
+                  .emitFrequencyUnit(TimeUnit.SECONDS)
+                  .build();
+
+          MetricsCollectorConfig runtimeConfig = runtimeMetricsInterval <= 0
+              ? DefaultMetricsCollectorConfig.disabled()
+              : DefaultMetricsCollectorConfig.create(runtimeMetricsInterval, TimeUnit.SECONDS);
+
+          DefaultCouchbaseEnvironment.Builder builder = DefaultCouchbaseEnvironment
+              .builder()
+              .queryEndpoints(queryEndpoints)
+              .callbacksOnIoPool(true)
+              .runtimeMetricsCollectorConfig(runtimeConfig)
+              .networkLatencyMetricsCollectorConfig(latencyConfig)
+              .socketConnectTimeout(10000) // 10 secs socket connect timeout
+              .connectTimeout(30000) // 30 secs overall bucket open timeout
+              .kvTimeout(10000) // 10 instead of 2.5s for KV ops
+              .kvEndpoints(kvEndpoints);
+
+          // Tune boosting and epoll based on settings
+          SelectStrategyFactory factory = boost > 0 ?
+              new BackoffSelectStrategyFactory() : DefaultSelectStrategyFactory.INSTANCE;
+
+          int poolSize = boost > 0 ? boost : Integer.parseInt(
+              System.getProperty("com.couchbase.ioPoolSize", Integer.toString(DefaultCoreEnvironment.IO_POOL_SIZE))
+          );
+          ThreadFactory threadFactory = new DefaultThreadFactory("cb-io", true);
+
+          EventLoopGroup group = epoll ? new EpollEventLoopGroup(poolSize, threadFactory, factory)
+              : new NioEventLoopGroup(poolSize, threadFactory, SelectorProvider.provider(), factory);
+          builder.ioPool(group, new IoPoolShutdownHook(group));
+
+          env = builder.build();
+          logParams();
+        }
+      }
+
+      cluster = CouchbaseCluster.create(env, host);
+      bucket = cluster.openBucket(bucketName, bucketPassword);
+      kvTimeout = env.kvTimeout();
+    } catch (Exception ex) {
+      throw new DBException("Could not connect to Couchbase Bucket.", ex);
+    }
+
+    if (!kv && !syncMutResponse) {
+      throw new DBException("Not waiting for N1QL responses on mutations not yet implemented.");
+    }
+  }
+
+  /**
+   * Helper method to log the CLI params so that on the command line debugging is easier.
+   */
+  private void logParams() {
+    StringBuilder sb = new StringBuilder();
+
+    sb.append("host=").append(host);
+    sb.append(", bucket=").append(bucketName);
+    sb.append(", upsert=").append(upsert);
+    sb.append(", persistTo=").append(persistTo);
+    sb.append(", replicateTo=").append(replicateTo);
+    sb.append(", syncMutResponse=").append(syncMutResponse);
+    sb.append(", adhoc=").append(adhoc);
+    sb.append(", kv=").append(kv);
+    sb.append(", maxParallelism=").append(maxParallelism);
+    sb.append(", queryEndpoints=").append(queryEndpoints);
+    sb.append(", kvEndpoints=").append(kvEndpoints);
+    sb.append(", queryEndpoints=").append(queryEndpoints);
+    sb.append(", epoll=").append(epoll);
+    sb.append(", boost=").append(boost);
+    sb.append(", networkMetricsInterval=").append(networkMetricsInterval);
+    sb.append(", runtimeMetricsInterval=").append(runtimeMetricsInterval);
+
+    LOGGER.info("===> Using Params: " + sb.toString());
+  }
+
+  @Override
+  public Status read(final String table, final String key, Set<String> fields,
+      final HashMap<String, ByteIterator> result) {
+    try {
+      String docId = formatId(table, key);
+      if (kv) {
+        return readKv(docId, fields, result);
+      } else {
+        return readN1ql(docId, fields, result);
+      }
+    } catch (Exception ex) {
+      ex.printStackTrace();
+      return Status.ERROR;
+    }
+  }
+
+  /**
+   * Performs the {@link #read(String, String, Set, HashMap)} operation via Key/Value ("get").
+   *
+   * @param docId the document ID
+   * @param fields the fields to be loaded
+   * @param result the result map where the doc needs to be converted into
+   * @return The result of the operation.
+   */
+  private Status readKv(final String docId, final Set<String> fields, final HashMap<String, ByteIterator> result)
+    throws Exception {
+    RawJsonDocument loaded = bucket.get(docId, RawJsonDocument.class);
+    if (loaded == null) {
+      return Status.NOT_FOUND;
+    }
+    decode(loaded.content(), fields, result);
+    return Status.OK;
+  }
+
+  /**
+   * Performs the {@link #read(String, String, Set, HashMap)} operation via N1QL ("SELECT").
+   *
+   * If this option should be used, the "-p couchbase.kv=false" property must be set.
+   *
+   * @param docId the document ID
+   * @param fields the fields to be loaded
+   * @param result the result map where the doc needs to be converted into
+   * @return The result of the operation.
+   */
+  private Status readN1ql(final String docId, Set<String> fields, final HashMap<String, ByteIterator> result)
+    throws Exception {
+    String readQuery = "SELECT " + joinFields(fields) + " FROM `" + bucketName + "` USE KEYS [$1]";
+    N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
+        readQuery,
+        JsonArray.from(docId),
+        N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
+    ));
+
+    if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
+      throw new DBException("Error while parsing N1QL Result. Query: " + readQuery
+        + ", Errors: " + queryResult.errors());
+    }
+
+    N1qlQueryRow row;
+    try {
+      row = queryResult.rows().next();
+    } catch (NoSuchElementException ex) {
+      return Status.NOT_FOUND;
+    }
+
+    JsonObject content = row.value();
+    if (fields == null) {
+      content = content.getObject(bucketName); // n1ql result set scoped under *.bucketName
+      fields = content.getNames();
+    }
+
+    for (String field : fields) {
+      Object value = content.get(field);
+      result.put(field, new StringByteIterator(value != null ? value.toString() : ""));
+    }
+
+    return Status.OK;
+  }
+
+  @Override
+  public Status update(final String table, final String key, final HashMap<String, ByteIterator> values) {
+    if (upsert) {
+      return upsert(table, key, values);
+    }
+
+    try {
+      String docId = formatId(table, key);
+      if (kv) {
+        return updateKv(docId, values);
+      } else {
+        return updateN1ql(docId, values);
+      }
+    } catch (Exception ex) {
+      ex.printStackTrace();
+      return Status.ERROR;
+    }
+  }
+
+  /**
+   * Performs the {@link #update(String, String, HashMap)} operation via Key/Value ("replace").
+   *
+   * @param docId the document ID
+   * @param values the values to update the document with.
+   * @return The result of the operation.
+   */
+  private Status updateKv(final String docId, final HashMap<String, ByteIterator> values) {
+    waitForMutationResponse(bucket.async().replace(
+        RawJsonDocument.create(docId, encode(values)),
+        persistTo,
+        replicateTo
+    ));
+    return Status.OK;
+  }
+
+  /**
+   * Performs the {@link #update(String, String, HashMap)} operation via N1QL ("UPDATE").
+   *
+   * If this option should be used, the "-p couchbase.kv=false" property must be set.
+   *
+   * @param docId the document ID
+   * @param values the values to update the document with.
+   * @return The result of the operation.
+   */
+  private Status updateN1ql(final String docId, final HashMap<String, ByteIterator> values)
+    throws Exception {
+    String fields = encodeN1qlFields(values);
+    String updateQuery = "UPDATE `" + bucketName + "` USE KEYS [$1] SET " + fields;
+
+    N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
+        updateQuery,
+        JsonArray.from(docId),
+        N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
+    ));
+
+    if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
+      throw new DBException("Error while parsing N1QL Result. Query: " + updateQuery
+        + ", Errors: " + queryResult.errors());
+    }
+    return Status.OK;
+  }
+
+  @Override
+  public Status insert(final String table, final String key, final HashMap<String, ByteIterator> values) {
+    if (upsert) {
+      return upsert(table, key, values);
+    }
+
+    try {
+      String docId = formatId(table, key);
+      if (kv) {
+        return insertKv(docId, values);
+      } else {
+        return insertN1ql(docId, values);
+      }
+    } catch (Exception ex) {
+      ex.printStackTrace();
+      return Status.ERROR;
+    }
+  }
+
+  /**
+   * Performs the {@link #insert(String, String, HashMap)} operation via Key/Value ("INSERT").
+   *
+   * Note that during the "load" phase it makes sense to retry TMPFAILS (so that even if the server is
+   * overloaded temporarily the ops will succeed eventually). The current code will retry TMPFAILs
+   * for maximum of one minute and then bubble up the error.
+   *
+   * @param docId the document ID
+   * @param values the values to update the document with.
+   * @return The result of the operation.
+   */
+  private Status insertKv(final String docId, final HashMap<String, ByteIterator> values) {
+    int tries = 60; // roughly 60 seconds with the 1 second sleep, not 100% accurate.
+
+    for(int i = 0; i < tries; i++) {
+      try {
+        waitForMutationResponse(bucket.async().insert(
+            RawJsonDocument.create(docId, encode(values)),
+            persistTo,
+            replicateTo
+        ));
+        return Status.OK;
+      } catch (TemporaryFailureException ex) {
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
+          throw new RuntimeException("Interrupted while sleeping on TMPFAIL backoff.", ex);
+        }
+      }
+    }
+
+    throw new RuntimeException("Still receiving TMPFAIL from the server after trying " + tries + " times. " +
+      "Check your server.");
+  }
+
+  /**
+   * Performs the {@link #insert(String, String, HashMap)} operation via N1QL ("INSERT").
+   *
+   * If this option should be used, the "-p couchbase.kv=false" property must be set.
+   *
+   * @param docId the document ID
+   * @param values the values to update the document with.
+   * @return The result of the operation.
+   */
+  private Status insertN1ql(final String docId, final HashMap<String, ByteIterator> values)
+    throws Exception {
+    String insertQuery = "INSERT INTO `" + bucketName + "`(KEY,VALUE) VALUES ($1,$2)";
+
+    N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
+        insertQuery,
+        JsonArray.from(docId, valuesToJsonObject(values)),
+        N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
+    ));
+
+    if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
+      throw new DBException("Error while parsing N1QL Result. Query: " + insertQuery
+        + ", Errors: " + queryResult.errors());
+    }
+    return Status.OK;
+  }
+
+  /**
+   * Performs an upsert instead of insert or update using either Key/Value or N1QL.
+   *
+   * If this option should be used, the "-p couchbase.upsert=true" property must be set.
+   *
+   * @param table The name of the table
+   * @param key The record key of the record to insert.
+   * @param values A HashMap of field/value pairs to insert in the record
+   * @return The result of the operation.
+   */
+  private Status upsert(final String table, final String key, final HashMap<String, ByteIterator> values) {
+    try {
+      String docId = formatId(table, key);
+      if (kv) {
+        return upsertKv(docId, values);
+      } else {
+        return upsertN1ql(docId, values);
+      }
+    } catch (Exception ex) {
+      ex.printStackTrace();
+      return Status.ERROR;
+    }
+  }
+
+  /**
+   * Performs the {@link #upsert(String, String, HashMap)} operation via Key/Value ("upsert").
+   *
+   * If this option should be used, the "-p couchbase.upsert=true" property must be set.
+   *
+   * @param docId the document ID
+   * @param values the values to update the document with.
+   * @return The result of the operation.
+   */
+  private Status upsertKv(final String docId, final HashMap<String, ByteIterator> values) {
+    waitForMutationResponse(bucket.async().upsert(
+        RawJsonDocument.create(docId, encode(values)),
+        persistTo,
+        replicateTo
+    ));
+    return Status.OK;
+  }
+
+  /**
+   * Performs the {@link #upsert(String, String, HashMap)} operation via N1QL ("UPSERT").
+   *
+   * If this option should be used, the "-p couchbase.upsert=true -p couchbase.kv=false" properties must be set.
+   *
+   * @param docId the document ID
+   * @param values the values to update the document with.
+   * @return The result of the operation.
+   */
+  private Status upsertN1ql(final String docId, final HashMap<String, ByteIterator> values)
+    throws Exception {
+    String upsertQuery = "UPSERT INTO `" + bucketName + "`(KEY,VALUE) VALUES ($1,$2)";
+
+    N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
+        upsertQuery,
+        JsonArray.from(docId, valuesToJsonObject(values)),
+        N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
+    ));
+
+    if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
+      throw new DBException("Error while parsing N1QL Result. Query: " + upsertQuery
+        + ", Errors: " + queryResult.errors());
+    }
+    return Status.OK;
+  }
+
+  @Override
+  public Status delete(final String table, final String key) {
+    try {
+      String docId = formatId(table, key);
+      if (kv) {
+        return deleteKv(docId);
+      } else {
+        return deleteN1ql(docId);
+      }
+    } catch (Exception ex) {
+      ex.printStackTrace();
+      return Status.ERROR;
+    }
+  }
+
+  /**
+   * Performs the {@link #delete(String, String)} (String, String)} operation via Key/Value ("remove").
+   *
+   * @param docId the document ID.
+   * @return The result of the operation.
+   */
+  private Status deleteKv(final String docId) {
+    waitForMutationResponse(bucket.async().remove(
+        docId,
+        persistTo,
+        replicateTo
+    ));
+    return Status.OK;
+  }
+
+  /**
+   * Performs the {@link #delete(String, String)} (String, String)} operation via N1QL ("DELETE").
+   *
+   * If this option should be used, the "-p couchbase.kv=false" property must be set.
+   *
+   * @param docId the document ID.
+   * @return The result of the operation.
+   */
+  private Status deleteN1ql(final String docId) throws Exception {
+    String deleteQuery = "DELETE FROM `" + bucketName + "` USE KEYS [$1]";
+    N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
+        deleteQuery,
+        JsonArray.from(docId),
+        N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
+    ));
+
+    if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
+      throw new DBException("Error while parsing N1QL Result. Query: " + deleteQuery
+        + ", Errors: " + queryResult.errors());
+    }
+    return Status.OK;
+  }
+
+  @Override
+  public Status scan(final String table, final String startkey, final int recordcount, final Set<String> fields,
+      final Vector<HashMap<String, ByteIterator>> result) {
+    try {
+      if (fields == null || fields.isEmpty()) {
+        return scanAllFields(table, startkey, recordcount, result);
+      } else {
+        return scanSpecificFields(table, startkey, recordcount, fields, result);
+      }
+    } catch (Exception ex) {
+      ex.printStackTrace();
+      return Status.ERROR;
+    }
+  }
+
+  /**
+   * Performs the {@link #scan(String, String, int, Set, Vector)} operation, optimized for all fields.
+   *
+   * Since the full document bodies need to be loaded anyways, it makes sense to just grab the document IDs
+   * from N1QL and then perform the bulk loading via KV for better performance. This is a usual pattern with
+   * Couchbase and shows the benefits of using both N1QL and KV together.
+   *
+   * @param table The name of the table
+   * @param startkey The record key of the first record to read.
+   * @param recordcount The number of records to read
+   * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
+   * @return The result of the operation.
+   */
+  private Status scanAllFields(final String table, final String startkey, final int recordcount,
+      final Vector<HashMap<String, ByteIterator>> result) {
+    final List<HashMap<String, ByteIterator>> data = new ArrayList<HashMap<String, ByteIterator>>(recordcount);
+
+    bucket.async()
+        .query(N1qlQuery.parameterized(
+          scanAllQuery,
+          JsonArray.from(formatId(table, startkey), recordcount),
+          N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
+        ))
+        .doOnNext(new Action1<AsyncN1qlQueryResult>() {
+          @Override
+          public void call(AsyncN1qlQueryResult result) {
+            if (!result.parseSuccess()) {
+              throw new RuntimeException("Error while parsing N1QL Result. Query: " + scanAllQuery
+                + ", Errors: " + result.errors());
+            }
+          }
+        })
+        .flatMap(new Func1<AsyncN1qlQueryResult, Observable<AsyncN1qlQueryRow>>() {
+          @Override
+          public Observable<AsyncN1qlQueryRow> call(AsyncN1qlQueryResult result) {
+            return result.rows();
+          }
+        })
+        .flatMap(new Func1<AsyncN1qlQueryRow, Observable<RawJsonDocument>>() {
+          @Override
+          public Observable<RawJsonDocument> call(AsyncN1qlQueryRow row) {
+            String id = new String(row.byteValue());
+            return bucket.async().get(
+              id.substring(id.indexOf(table + SEPARATOR), id.lastIndexOf('"')),
+              RawJsonDocument.class
+            );
+          }
+        })
+        .map(new Func1<RawJsonDocument, HashMap<String, ByteIterator>>() {
+          @Override
+          public HashMap<String, ByteIterator> call(RawJsonDocument document) {
+            HashMap<String, ByteIterator> tuple = new HashMap<String, ByteIterator>();
+            decode(document.content(), null, tuple);
+            return tuple;
+          }
+        })
+        .toBlocking()
+        .forEach(new Action1<HashMap<String, ByteIterator>>() {
+          @Override
+          public void call(HashMap<String, ByteIterator> tuple) {
+            data.add(tuple);
+          }
+        });
+
+    result.addAll(data);
+    return Status.OK;
+  }
+
+  /**
+   * Performs the {@link #scan(String, String, int, Set, Vector)} operation N1Ql only for a subset of the fields.
+   *
+   * @param table The name of the table
+   * @param startkey The record key of the first record to read.
+   * @param recordcount The number of records to read
+   * @param fields The list of fields to read, or null for all of them
+   * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
+   * @return The result of the operation.
+   */
+  private Status scanSpecificFields(final String table, final String startkey, final int recordcount,
+      final Set<String> fields, final Vector<HashMap<String, ByteIterator>> result) {
+    String scanSpecQuery = "SELECT " + joinFields(fields) + " FROM `" + bucketName
+        + "` WHERE meta().id >= '$1' LIMIT $2";
+    N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized(
+        scanSpecQuery,
+        JsonArray.from(formatId(table, startkey), recordcount),
+        N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism)
+    ));
+
+    if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) {
+      throw new RuntimeException("Error while parsing N1QL Result. Query: " + scanSpecQuery
+        + ", Errors: " + queryResult.errors());
+    }
+
+    boolean allFields = fields == null || fields.isEmpty();
+    result.ensureCapacity(recordcount);
+
+    for (N1qlQueryRow row : queryResult) {
+      JsonObject value = row.value();
+      if (fields == null) {
+        value = value.getObject(bucketName);
+      }
+      Set<String> f = allFields ? value.getNames() : fields;
+      HashMap<String, ByteIterator> tuple = new HashMap<String, ByteIterator>(f.size());
+      for (String field : f) {
+        tuple.put(field, new StringByteIterator(value.getString(field)));
+      }
+      result.add(tuple);
+    }
+    return Status.OK;
+  }
+
+  /**
+   * Helper method to block on the response, depending on the property set.
+   *
+   * By default, since YCSB is sync the code will always wait for the operation to complete. In some
+   * cases it can be useful to just "drive load" and disable the waiting. Note that when the
+   * "-p couchbase.syncMutationResponse=false" option is used, the measured results by YCSB can basically
+   * be thrown away. Still helpful sometimes during load phases to speed them up :)
+   *
+   * @param input the async input observable.
+   */
+  private void waitForMutationResponse(final Observable<? extends Document<?>> input) {
+    if (!syncMutResponse) {
+      input.subscribe(new Subscriber<Document<?>>() {
+        @Override
+        public void onCompleted() {
+        }
+
+        @Override
+        public void onError(Throwable e) {
+        }
+
+        @Override
+        public void onNext(Document<?> document) {
+        }
+      });
+    } else {
+      Blocking.blockForSingle(input, kvTimeout, TimeUnit.MILLISECONDS);
+    }
+  }
+
+  /**
+   * Helper method to turn the values into a String, used with {@link #upsertN1ql(String, HashMap)}.
+   *
+   * @param values the values to encode.
+   * @return the encoded string.
+   */
+  private static String encodeN1qlFields(final HashMap<String, ByteIterator> values) {
+    if (values.isEmpty()) {
+      return "";
+    }
+
+    StringBuilder sb = new StringBuilder();
+    for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
+      String raw = entry.getValue().toString();
+      String escaped = raw.replace("\"", "\\\"").replace("\'", "\\\'");
+      sb.append(entry.getKey()).append("=\"").append(escaped).append("\" ");
+    }
+    String toReturn = sb.toString();
+    return toReturn.substring(0, toReturn.length() - 1);
+  }
+
+  /**
+   * Helper method to turn the map of values into a {@link JsonObject} for further use.
+   *
+   * @param values the values to transform.
+   * @return the created json object.
+   */
+  private static JsonObject valuesToJsonObject(final HashMap<String, ByteIterator> values) {
+    JsonObject result = JsonObject.create();
+    for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
+      result.put(entry.getKey(), entry.getValue().toString());
+    }
+    return result;
+  }
+
+  /**
+   * Helper method to join the set of fields into a String suitable for N1QL.
+   *
+   * @param fields the fields to join.
+   * @return the joined fields as a String.
+   */
+  private static String joinFields(final Set<String> fields) {
+    if (fields == null || fields.isEmpty()) {
+      return "*";
+    }
+    StringBuilder builder = new StringBuilder();
+    for (String f : fields) {
+      builder.append("`").append(f).append("`").append(",");
+    }
+    String toReturn = builder.toString();
+    return toReturn.substring(0, toReturn.length() - 1);
+  }
+
+  /**
+   * Helper method to turn the prefix and key into a proper document ID.
+   *
+   * @param prefix the prefix (table).
+   * @param key the key itself.
+   * @return a document ID that can be used with Couchbase.
+   */
+  private static String formatId(final String prefix, final String key) {
+    return prefix + SEPARATOR + key;
+  }
+
+  /**
+   * Helper method to parse the "ReplicateTo" property on startup.
+   *
+   * @param property the proeprty to parse.
+   * @return the parsed setting.
+   */
+  private static ReplicateTo parseReplicateTo(final String property) throws DBException {
+    int value = Integer.parseInt(property);
+
+    switch (value) {
+    case 0:
+      return ReplicateTo.NONE;
+    case 1:
+      return ReplicateTo.ONE;
+    case 2:
+      return ReplicateTo.TWO;
+    case 3:
+      return ReplicateTo.THREE;
+    default:
+      throw new DBException("\"couchbase.replicateTo\" must be between 0 and 3");
+    }
+  }
+
+  /**
+   * Helper method to parse the "PersistTo" property on startup.
+   *
+   * @param property the proeprty to parse.
+   * @return the parsed setting.
+   */
+  private static PersistTo parsePersistTo(final String property) throws DBException {
+    int value = Integer.parseInt(property);
+
+    switch (value) {
+    case 0:
+      return PersistTo.NONE;
+    case 1:
+      return PersistTo.ONE;
+    case 2:
+      return PersistTo.TWO;
+    case 3:
+      return PersistTo.THREE;
+    case 4:
+      return PersistTo.FOUR;
+    default:
+      throw new DBException("\"couchbase.persistTo\" must be between 0 and 4");
+    }
+  }
+
+  /**
+   * Decode the String from server and pass it into the decoded destination.
+   *
+   * @param source the loaded object.
+   * @param fields the fields to check.
+   * @param dest the result passed back to YCSB.
+   */
+  private void decode(final String source, final Set<String> fields,
+                      final HashMap<String, ByteIterator> dest) {
+    try {
+      JsonNode json = JacksonTransformers.MAPPER.readTree(source);
+      boolean checkFields = fields != null && !fields.isEmpty();
+      for (Iterator<Map.Entry<String, JsonNode>> jsonFields = json.fields(); jsonFields.hasNext();) {
+        Map.Entry<String, JsonNode> jsonField = jsonFields.next();
+        String name = jsonField.getKey();
+        if (checkFields && fields.contains(name)) {
+          continue;
+        }
+        JsonNode jsonValue = jsonField.getValue();
+        if (jsonValue != null && !jsonValue.isNull()) {
+          dest.put(name, new StringByteIterator(jsonValue.asText()));
+        }
+      }
+    } catch (Exception e) {
+      throw new RuntimeException("Could not decode JSON");
+    }
+  }
+
+  /**
+   * Encode the source into a String for storage.
+   *
+   * @param source the source value.
+   * @return the encoded string.
+   */
+  private String encode(final HashMap<String, ByteIterator> source) {
+    HashMap<String, String> stringMap = StringByteIterator.getStringMap(source);
+    ObjectNode node = JacksonTransformers.MAPPER.createObjectNode();
+    for (Map.Entry<String, String> pair : stringMap.entrySet()) {
+      node.put(pair.getKey(), pair.getValue());
+    }
+    JsonFactory jsonFactory = new JsonFactory();
+    Writer writer = new StringWriter();
+    try {
+      JsonGenerator jsonGenerator = jsonFactory.createGenerator(writer);
+      JacksonTransformers.MAPPER.writeTree(jsonGenerator, node);
+    } catch (Exception e) {
+      throw new RuntimeException("Could not encode JSON value");
+    }
+    return writer.toString();
+  }
+}
+
+/**
+ * Factory for the {@link BackoffSelectStrategy} to be used with boosting.
+ */
+class BackoffSelectStrategyFactory implements SelectStrategyFactory {
+  @Override
+  public SelectStrategy newSelectStrategy() {
+    return new BackoffSelectStrategy();
+  }
+}
+
+/**
+ * Custom IO select strategy which trades CPU for throughput, used with the boost setting.
+ */
+class BackoffSelectStrategy implements SelectStrategy {
+
+  private int counter = 0;
+
+  @Override
+  public int calculateStrategy(final IntSupplier supplier, final boolean hasTasks) throws Exception {
+    int selectNowResult = supplier.get();
+    if (hasTasks || selectNowResult != 0) {
+      counter = 0;
+      return selectNowResult;
+    }
+    counter++;
+
+    if (counter > 2000) {
+      LockSupport.parkNanos(1);
+    } else if (counter > 3000) {
+      Thread.yield();
+    } else if (counter > 4000) {
+      LockSupport.parkNanos(1000);
+    } else if (counter > 5000) {
+      // defer to blocking select
+      counter = 0;
+      return SelectStrategy.SELECT;
+    }
+
+    return SelectStrategy.CONTINUE;
+  }
+}
diff --git a/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/package-info.java b/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/package-info.java
new file mode 100644
index 0000000000000000000000000000000000000000..0eb3b3992b30adf338223870471a1e75ed82ab3b
--- /dev/null
+++ b/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2015 - 2016 YCSB contributors. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+
+/**
+ * The YCSB binding for <a href="http://www.couchbase.com/">Couchbase</a>, new driver.
+ */
+package com.yahoo.ycsb.db.couchbase2;
+
diff --git a/distribution/pom.xml b/distribution/pom.xml
index a6f219432412eab93878dbbc07c70b506fdd2bd3..d3888c9e9daadbe9e8bab3e7812ed759caef1340 100644
--- a/distribution/pom.xml
+++ b/distribution/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>root</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
   </parent>
 
   <artifactId>ycsb</artifactId>
@@ -49,6 +49,11 @@ LICENSE file.
       <artifactId>aerospike-binding</artifactId>
       <version>${project.version}</version>
     </dependency>
+    <dependency>
+      <groupId>com.yahoo.ycsb</groupId>
+      <artifactId>asynchbase-binding</artifactId>
+      <version>${project.version}</version>
+    </dependency>
     <dependency>
       <groupId>com.yahoo.ycsb</groupId>
       <artifactId>cassandra-binding</artifactId>
@@ -59,6 +64,11 @@ LICENSE file.
       <artifactId>couchbase-binding</artifactId>
       <version>${project.version}</version>
     </dependency>
+    <dependency>
+      <groupId>com.yahoo.ycsb</groupId>
+      <artifactId>couchbase2-binding</artifactId>
+      <version>${project.version}</version>
+    </dependency>
     <dependency>
       <groupId>com.yahoo.ycsb</groupId>
       <artifactId>dynamodb-binding</artifactId>
@@ -79,6 +89,11 @@ LICENSE file.
       <artifactId>googledatastore-binding</artifactId>
       <version>${project.version}</version>
     </dependency>
+    <dependency>
+      <groupId>com.yahoo.ycsb</groupId>
+      <artifactId>googlebigtable-binding</artifactId>
+      <version>${project.version}</version>
+    </dependency>
     <dependency>
       <groupId>com.yahoo.ycsb</groupId>
       <artifactId>hbase094-binding</artifactId>
@@ -144,6 +159,11 @@ LICENSE file.
       <artifactId>redis-binding</artifactId>
       <version>${project.version}</version>
     </dependency>
+    <dependency>
+      <groupId>com.yahoo.ycsb</groupId>
+      <artifactId>riak-binding</artifactId>
+      <version>${project.version}</version>
+    </dependency>
     <dependency>
       <groupId>com.yahoo.ycsb</groupId>
       <artifactId>s3-binding</artifactId>
diff --git a/dynamodb/pom.xml b/dynamodb/pom.xml
index 593371a649b90929a99c90851e43ef6fb8233fa5..3aa162c60acf9baee121dfb8e4d82b80618ad1f7 100644
--- a/dynamodb/pom.xml
+++ b/dynamodb/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
diff --git a/elasticsearch/README.md b/elasticsearch/README.md
index 344ea9c91ac73a509909516688dcec0a2eaec8b9..157ccec0c04fe4688da30af531b5fbdc3db4703d 100644
--- a/elasticsearch/README.md
+++ b/elasticsearch/README.md
@@ -43,15 +43,12 @@ For further configuration see below:
 The default setting for the Elasticsearch node that is created is as follows:
 
     cluster.name=es.ycsb.cluster
-    node.local=true
-    path.data=$TEMP_DIR/esdata
-    discovery.zen.ping.multicast.enabled=false
-    index.mapping._id.indexed=true
-    index.gateway.type=none
-    gateway.type=none
-    index.number_of_shards=1
-    index.number_of_replicas=0
     es.index.key=es.ycsb
+    es.number_of_shards=1
+    es.number_of_replicas=0
+    es.remote=false
+    es.newdb=false
+    es.hosts.list=localhost:9200 (only applies if es.remote=true)
 
 ### Custom Configuration
 If you wish to customize the settings used to create the Elasticsearch node
@@ -66,25 +63,17 @@ pass it into the Elasticsearch client:
 
     ./bin/ycsb run elasticsearch -P workloads/workloada -P myproperties.data -s
 
-
-If you wish to use a in-memory store type rather than the default disk store add 
-the following properties to your custom properties file. For a large number of 
-insert operations insure that you have sufficient memory on your test system 
-otherwise you will run out of memory.
-
-    index.store.type=memory
-    index.store.fs.memory.enabled=true
-    cache.memory.small_buffer_size=4mb
-    cache.memory.large_cache_size=1024mb
-
 If you wish to change the default index name you can set the following property:
 
     es.index.key=my_index_key
 
-### Troubleshoot
-If you encounter error messages such as :
-"Primary shard is not active or isn't assigned is a known node."
+If you wish to run against a remote cluster you can set the following property:
+
+    es.remote=true
+
+By default this will use localhost:9300 as a seed node to discover the cluster.
+You can also specify
 
-Try removing /tmp/esdata/ folder. 
-    rm -rf /tmp/esdata
+    es.hosts.list=(\w+:\d+)+
 
+(a comma-separated list of host/port pairs) to change this.
diff --git a/elasticsearch/pom.xml b/elasticsearch/pom.xml
index f285dab2b47525458b78aa87c6325b316259c27f..90598be71e5486671b60a6f9325b52df9d64286f 100644
--- a/elasticsearch/pom.xml
+++ b/elasticsearch/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
     <parent>
         <groupId>com.yahoo.ycsb</groupId>
         <artifactId>binding-parent</artifactId>
-        <version>0.9.0-SNAPSHOT</version>
+        <version>0.10.0-SNAPSHOT</version>
         <relativePath>../binding-parent</relativePath>
     </parent>
 
@@ -29,7 +29,7 @@ LICENSE file.
     <name>Elasticsearch Binding</name>
     <packaging>jar</packaging>
     <properties>
-        <elasticsearch-version>2.2.0</elasticsearch-version>
+        <elasticsearch-version>2.3.2</elasticsearch-version>
     </properties>
     <dependencies>
         <dependency>
diff --git a/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java b/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java
index 1d79e3c2897eb79e25363ba15c81f7bd00d818d6..6a95d9cefc64c905ebd7d3a6b2c559bf20e5b2f6 100644
--- a/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java
+++ b/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java
@@ -22,7 +22,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
 import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
 import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
 
-
 import com.yahoo.ycsb.ByteIterator;
 import com.yahoo.ycsb.DB;
 import com.yahoo.ycsb.DBException;
@@ -30,6 +29,8 @@ import com.yahoo.ycsb.Status;
 import com.yahoo.ycsb.StringByteIterator;
 
 import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.delete.DeleteResponse;
 import org.elasticsearch.action.get.GetResponse;
 import org.elasticsearch.action.search.SearchResponse;
 import org.elasticsearch.client.Client;
@@ -57,19 +58,19 @@ import java.util.Vector;
  * Default properties to set:
  * </p>
  * <ul>
- * <li>es.cluster.name = es.ycsb.cluster
- * <li>es.client = true
+ * <li>cluster.name = es.ycsb.cluster
  * <li>es.index.key = es.ycsb
+ * <li>es.number_of_shards = 1
+ * <li>es.number_of_replicas = 0
  * </ul>
- *
- * @author Sharmarke Aden
- *
  */
 public class ElasticsearchClient extends DB {
 
-  public static final String DEFAULT_CLUSTER_NAME = "es.ycsb.cluster";
-  public static final String DEFAULT_INDEX_KEY = "es.ycsb";
-  public static final String DEFAULT_REMOTE_HOST = "localhost:9300";
+  private static final String DEFAULT_CLUSTER_NAME = "es.ycsb.cluster";
+  private static final String DEFAULT_INDEX_KEY = "es.ycsb";
+  private static final String DEFAULT_REMOTE_HOST = "localhost:9300";
+  private static final int NUMBER_OF_SHARDS = 1;
+  private static final int NUMBER_OF_REPLICAS = 0;
   private Node node;
   private Client client;
   private String indexKey;
@@ -83,32 +84,26 @@ public class ElasticsearchClient extends DB {
   public void init() throws DBException {
     Properties props = getProperties();
     this.indexKey = props.getProperty("es.index.key", DEFAULT_INDEX_KEY);
-    String clusterName =
-        props.getProperty("cluster.name", DEFAULT_CLUSTER_NAME);
+
+    int numberOfShards = parseIntegerProperty(props, "es.number_of_shards", NUMBER_OF_SHARDS);
+    int numberOfReplicas = parseIntegerProperty(props, "es.number_of_replicas", NUMBER_OF_REPLICAS);
+
     // Check if transport client needs to be used (To connect to multiple
     // elasticsearch nodes)
-    remoteMode = Boolean
-        .parseBoolean(props.getProperty("elasticsearch.remote", "false"));
-    Boolean newdb =
-        Boolean.parseBoolean(props.getProperty("elasticsearch.newdb", "false"));
+    remoteMode = Boolean.parseBoolean(props.getProperty("es.remote", "false"));
+    Boolean newdb = Boolean.parseBoolean(props.getProperty("es.newdb", "false"));
     Builder settings = Settings.settingsBuilder()
-        .put("node.local", "true")
-        .put("path.data", System.getProperty("java.io.tmpdir") + "/esdata")
-        .put("discovery.zen.ping.multicast.enabled", "false")
-        .put("index.mapping._id.indexed", "true")
-        .put("index.gateway.type", "none")
-        .put("index.number_of_shards", "1")
-        .put("index.number_of_replicas", "0")
+        .put("cluster.name", DEFAULT_CLUSTER_NAME)
+        .put("node.local", Boolean.toString(!remoteMode))
         .put("path.home", System.getProperty("java.io.tmpdir"));
 
     // if properties file contains elasticsearch user defined properties
     // add it to the settings file (will overwrite the defaults).
     settings.put(props);
-    System.out.println(
-        "Elasticsearch starting node = " + settings.get("cluster.name"));
-    System.out
-        .println("Elasticsearch node data path = " + settings.get("path.data"));
-    System.out.println("Elasticsearch Remote Mode = " + remoteMode);
+    final String clusterName = settings.get("cluster.name");
+    System.err.println("Elasticsearch starting node = " + clusterName);
+    System.err.println("Elasticsearch node path.home = " + settings.get("path.home"));
+    System.err.println("Elasticsearch Remote Mode = " + remoteMode);
     // Remote mode support for connecting to remote elasticsearch cluster
     if (remoteMode) {
       settings.put("client.transport.sniff", true)
@@ -116,13 +111,9 @@ public class ElasticsearchClient extends DB {
           .put("client.transport.ping_timeout", "30s")
           .put("client.transport.nodes_sampler_interval", "30s");
       // Default it to localhost:9300
-      String[] nodeList =
-          props.getProperty("elasticsearch.hosts.list", DEFAULT_REMOTE_HOST)
-              .split(",");
-      System.out.println("Elasticsearch Remote Hosts = "
-          + props.getProperty("elasticsearch.hosts.list", DEFAULT_REMOTE_HOST));
-      TransportClient tClient = TransportClient.builder()
-                                  .settings(settings).build();
+      String[] nodeList = props.getProperty("es.hosts.list", DEFAULT_REMOTE_HOST).split(",");
+      System.out.println("Elasticsearch Remote Hosts = " + props.getProperty("es.hosts.list", DEFAULT_REMOTE_HOST));
+      TransportClient tClient = TransportClient.builder().settings(settings).build();
       for (String h : nodeList) {
         String[] nodes = h.split(":");
         try {
@@ -143,21 +134,29 @@ public class ElasticsearchClient extends DB {
       client = node.client();
     }
 
-    //wait for shards to be ready
-    client.admin().cluster()
-      .health(new ClusterHealthRequest("lists").waitForActiveShards(1))
-      .actionGet();
-    if (newdb) {
+    final boolean exists =
+            client.admin().indices()
+                    .exists(Requests.indicesExistsRequest(indexKey)).actionGet()
+                    .isExists();
+    if (exists && newdb) {
       client.admin().indices().prepareDelete(indexKey).execute().actionGet();
-      client.admin().indices().prepareCreate(indexKey).execute().actionGet();
-    } else {
-      boolean exists = client.admin().indices()
-          .exists(Requests.indicesExistsRequest(indexKey)).actionGet()
-          .isExists();
-      if (!exists) {
-        client.admin().indices().prepareCreate(indexKey).execute().actionGet();
-      }
     }
+    if (!exists || newdb) {
+      client.admin().indices().create(
+              new CreateIndexRequest(indexKey)
+                      .settings(
+                              Settings.builder()
+                                      .put("index.number_of_shards", numberOfShards)
+                                      .put("index.number_of_replicas", numberOfReplicas)
+                                      .put("index.mapping._id.indexed", true)
+                      )).actionGet();
+    }
+    client.admin().cluster().health(new ClusterHealthRequest().waitForGreenStatus()).actionGet();
+  }
+
+  private int parseIntegerProperty(Properties properties, String key, int defaultValue) {
+    String value = properties.getProperty(key);
+    return value == null ? defaultValue : Integer.parseInt(value);
   }
 
   @Override
@@ -187,26 +186,23 @@ public class ElasticsearchClient extends DB {
    *         description for a discussion of error codes.
    */
   @Override
-  public Status insert(String table, String key,
-      HashMap<String, ByteIterator> values) {
+  public Status insert(String table, String key, HashMap<String, ByteIterator> values) {
     try {
       final XContentBuilder doc = jsonBuilder().startObject();
 
-      for (Entry<String, String> entry : StringByteIterator.getStringMap(values)
-          .entrySet()) {
+      for (Entry<String, String> entry : StringByteIterator.getStringMap(values).entrySet()) {
         doc.field(entry.getKey(), entry.getValue());
       }
 
       doc.endObject();
 
-      client.prepareIndex(indexKey, table, key).setSource(doc).execute()
-          .actionGet();
+      client.prepareIndex(indexKey, table, key).setSource(doc).execute().actionGet();
 
       return Status.OK;
     } catch (Exception e) {
       e.printStackTrace();
+      return Status.ERROR;
     }
-    return Status.ERROR;
   }
 
   /**
@@ -222,12 +218,16 @@ public class ElasticsearchClient extends DB {
   @Override
   public Status delete(String table, String key) {
     try {
-      client.prepareDelete(indexKey, table, key).execute().actionGet();
-      return Status.OK;
+      DeleteResponse response = client.prepareDelete(indexKey, table, key).execute().actionGet();
+      if (response.isFound()) {
+        return Status.OK;
+      } else {
+        return Status.NOT_FOUND;
+      }
     } catch (Exception e) {
       e.printStackTrace();
+      return Status.ERROR;
     }
-    return Status.ERROR;
   }
 
   /**
@@ -245,11 +245,9 @@ public class ElasticsearchClient extends DB {
    * @return Zero on success, a non-zero error code on error or "not found".
    */
   @Override
-  public Status read(String table, String key, Set<String> fields,
-      HashMap<String, ByteIterator> result) {
+  public Status read(String table, String key, Set<String> fields, HashMap<String, ByteIterator> result) {
     try {
-      final GetResponse response =
-          client.prepareGet(indexKey, table, key).execute().actionGet();
+      final GetResponse response = client.prepareGet(indexKey, table, key).execute().actionGet();
 
       if (response.isExists()) {
         if (fields != null) {
@@ -264,11 +262,13 @@ public class ElasticsearchClient extends DB {
           }
         }
         return Status.OK;
+      } else {
+        return Status.NOT_FOUND;
       }
     } catch (Exception e) {
       e.printStackTrace();
+      return Status.ERROR;
     }
-    return Status.ERROR;
   }
 
   /**
@@ -286,28 +286,25 @@ public class ElasticsearchClient extends DB {
    *         description for a discussion of error codes.
    */
   @Override
-  public Status update(String table, String key,
-      HashMap<String, ByteIterator> values) {
+  public Status update(String table, String key, HashMap<String, ByteIterator> values) {
     try {
-      final GetResponse response =
-          client.prepareGet(indexKey, table, key).execute().actionGet();
+      final GetResponse response = client.prepareGet(indexKey, table, key).execute().actionGet();
 
       if (response.isExists()) {
-        for (Entry<String, String> entry : StringByteIterator
-            .getStringMap(values).entrySet()) {
+        for (Entry<String, String> entry : StringByteIterator.getStringMap(values).entrySet()) {
           response.getSource().put(entry.getKey(), entry.getValue());
         }
 
-        client.prepareIndex(indexKey, table, key)
-            .setSource(response.getSource()).execute().actionGet();
+        client.prepareIndex(indexKey, table, key).setSource(response.getSource()).execute().actionGet();
 
         return Status.OK;
+      } else {
+        return Status.NOT_FOUND;
       }
-
     } catch (Exception e) {
       e.printStackTrace();
+      return Status.ERROR;
     }
-    return Status.ERROR;
   }
 
   /**
@@ -329,8 +326,12 @@ public class ElasticsearchClient extends DB {
    *         description for a discussion of error codes.
    */
   @Override
-  public Status scan(String table, String startkey, int recordcount,
-      Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
+  public Status scan(
+          String table,
+          String startkey,
+          int recordcount,
+          Set<String> fields,
+          Vector<HashMap<String, ByteIterator>> result) {
     try {
       final RangeQueryBuilder rangeQuery = rangeQuery("_id").gte(startkey);
       final SearchResponse response = client.prepareSearch(indexKey)
@@ -343,20 +344,17 @@ public class ElasticsearchClient extends DB {
       HashMap<String, ByteIterator> entry;
 
       for (SearchHit hit : response.getHits()) {
-        entry = new HashMap<String, ByteIterator>(fields.size());
-
+        entry = new HashMap<>(fields.size());
         for (String field : fields) {
-          entry.put(field,
-              new StringByteIterator((String) hit.getSource().get(field)));
+          entry.put(field, new StringByteIterator((String) hit.getSource().get(field)));
         }
-
         result.add(entry);
       }
 
       return Status.OK;
     } catch (Exception e) {
       e.printStackTrace();
+      return Status.ERROR;
     }
-    return Status.ERROR;
   }
 }
diff --git a/elasticsearch/src/test/java/com/yahoo/ycsb/db/ElasticsearchClientTest.java b/elasticsearch/src/test/java/com/yahoo/ycsb/db/ElasticsearchClientTest.java
index 1a80cca37a0137a7c44d39927975e9d7053f6135..69e52ff678f2c29a39fd1e433293f893db2a3e58 100644
--- a/elasticsearch/src/test/java/com/yahoo/ycsb/db/ElasticsearchClientTest.java
+++ b/elasticsearch/src/test/java/com/yahoo/ycsb/db/ElasticsearchClientTest.java
@@ -38,10 +38,6 @@ import java.util.HashMap;
 import java.util.Set;
 import java.util.Vector;
 
-/**
- *
- * @author saden
- */
 public class ElasticsearchClientTest {
 
     protected final static ElasticsearchClient instance = new ElasticsearchClient();
diff --git a/geode/pom.xml b/geode/pom.xml
index 40111413e57ee824c8b717296c6228c5719ebb27..2ac239793ade4b2b90a27ab86dabc452a329964e 100644
--- a/geode/pom.xml
+++ b/geode/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   
@@ -36,7 +36,7 @@ LICENSE file.
   <dependencies>
     <dependency>
       <groupId>org.apache.geode</groupId>
-      <artifactId>gemfire-core</artifactId>
+      <artifactId>geode-core</artifactId>
       <version>${geode.version}</version>
     </dependency>
     <dependency>
diff --git a/googlebigtable/README.md b/googlebigtable/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3938b525eaa3fcc02ca06e8033cd402214ede08a
--- /dev/null
+++ b/googlebigtable/README.md
@@ -0,0 +1,80 @@
+<!--
+Copyright (c) 2016 YCSB contributors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you
+may not use this file except in compliance with the License. You
+may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied. See the License for the specific language governing
+permissions and limitations under the License. See accompanying
+LICENSE file.
+-->
+
+# Google Bigtable  Driver for YCSB
+
+This driver provides a YCSB workload binding for Google's hosted Bigtable, the inspiration for a number of key-value stores like HBase and Cassandra. The Bigtable Java client provides both Protobuf based GRPC and HBase client APIs. This binding implements the Protobuf API for testing the native client. To test Bigtable using the HBase API, see the `hbase10` binding.
+
+## Quickstart
+
+### 1. Setup a Bigtable Cluster
+
+Login to the Google Cloud Console and follow the [Creating Cluster](https://cloud.google.com/bigtable/docs/creating-cluster) steps. Make a note of your cluster name, zone and project ID.
+
+### 2. Launch the Bigtable Shell
+
+From the Cloud Console, launch a shell and follow the [Quickstart](https://cloud.google.com/bigtable/docs/quickstart) up to step 4 where you launch the HBase shell.
+
+### 3. Create a Table
+
+For best results, use the pre-splitting strategy recommended in [HBASE-4163](https://issues.apache.org/jira/browse/HBASE-4163):
+
+```
+hbase(main):001:0> n_splits = 200 # HBase recommends (10 * number of regionservers)
+hbase(main):002:0> create 'usertable', 'cf', {SPLITS => (1..n_splits).map {|i| "user#{1000+i*(9999-1000)/n_splits}"}}
+```
+
+Make a note of the column family, in this example it's `cf``.
+
+### 4. Fetch the Proper ALPN Boot Jar
+
+The Bigtable protocol uses HTTP/2 which requires an ALPN protocol negotiation implementation. On JVM instantiation the implementation must be loaded before attempting to connect to the cluster. If you're using Java 7 or 8, use this [Jetty Version Table](http://www.eclipse.org/jetty/documentation/current/alpn-chapter.html#alpn-versions) to determine the version appropriate for your JVM. (ALPN is included in JDK 9+). Download the proper jar from [Maven](http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22org.mortbay.jetty.alpn%22%20AND%20a%3A%22alpn-boot%22) somewhere on your system.
+
+### 5. Download JSON Credentials
+
+Follow these instructions for [Generating a JSON key](https://cloud.google.com/bigtable/docs/installing-hbase-shell#service-account) and save it to your host.
+
+### 6. Load a Workload
+
+Switch to the root of the YCSB repo and choose the workload you want to run and `load` it first. With the CLI you must provide the column family, cluster properties and the ALPN jar to load.
+
+```
+bin/ycsb load googlebigtable -p columnfamily=cf -p google.bigtable.project.id=<PROJECT_ID> -p google.bigtable.cluster.name=<CLUSTER> -p google.bigtable.zone.name=<ZONE> -p google.bigtable.auth.service.account.enable=true -p google.bigtable.auth.json.keyfile=<PATH_TO_JSON_KEY> -jvm-args='-Xbootclasspath/p:<PATH_TO_ALPN_JAR>' -P workloads/workloada
+
+```
+
+Make sure to replace the variables in the angle brackets above with the proper value from your cluster. Additional configuration parameters are available below.
+
+The `load` step only executes inserts into the datastore. After loading data, run the same workload to mix reads with writes.
+
+```
+bin/ycsb run googlebigtable -p columnfamily=cf -p google.bigtable.project.id=<PROJECT_ID> -p google.bigtable.cluster.name=<CLUSTER> -p google.bigtable.zone.name=<ZONE> -p google.bigtable.auth.service.account.enable=true -p google.bigtable.auth.json.keyfile=<PATH_TO_JSON_KEY> -jvm-args='-Xbootclasspath/p:<PATH_TO_ALPN_JAR>' -P workloads/workloada
+
+```
+
+## Configuration Options
+
+The following options can be configured using CLI (using the `-p` parameter) or hbase-site.xml (add the HBase config directory to YCSB's class path via CLI). Check the [Cloud Bigtable Client](https://github.com/manolama/cloud-bigtable-client) project for additional tuning parameters.
+
+* `columnfamily`: (Required) The Bigtable column family to target.
+* `google.bigtable.project.id`: (Required) The ID of a Bigtable project.
+* `google.bigtable.cluster.name`: (Required) The name of a Bigtable cluster.
+* `google.bigtable.zone.name`: (Required) Zone where the Bigtable cluster is running.
+* `google.bigtable.auth.service.account.enable`: Whether or not to authenticate with a service account. The default is true.
+* `google.bigtable.auth.json.keyfile`: (Required) A service account key for authentication.
+* `debug`: If true, prints debug information to standard out. The default is false.
+* `clientbuffering`: Whether or not to use client side buffering and batching of write operations. This can significantly improve performance and defaults to true.
diff --git a/googlebigtable/pom.xml b/googlebigtable/pom.xml
new file mode 100644
index 0000000000000000000000000000000000000000..802cd6ee81ad9786a7288d477857e8f176429446
--- /dev/null
+++ b/googlebigtable/pom.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2016 YCSB contributors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you
+may not use this file except in compliance with the License. You
+may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied. See the License for the specific language governing
+permissions and limitations under the License. See accompanying
+LICENSE file.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>com.yahoo.ycsb</groupId>
+    <artifactId>binding-parent</artifactId>
+    <version>0.10.0-SNAPSHOT</version>
+    <relativePath>../binding-parent/</relativePath>
+  </parent>
+
+  <artifactId>googlebigtable-binding</artifactId>
+  <name>Google Cloud Bigtable Binding</name>
+  <packaging>jar</packaging>
+
+  <dependencies>
+    <dependency>
+      <groupId>com.google.cloud.bigtable</groupId>
+      <artifactId>bigtable-hbase-1.0</artifactId>
+      <version>${googlebigtable.version}</version>
+    </dependency>
+    
+    <dependency>
+      <groupId>com.yahoo.ycsb</groupId>
+      <artifactId>core</artifactId>
+      <version>${project.version}</version>
+      <scope>provided</scope>
+    </dependency>
+    
+  </dependencies>
+</project>
\ No newline at end of file
diff --git a/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java b/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java
new file mode 100644
index 0000000000000000000000000000000000000000..d0d21dda4b22e796a18d0918846f7afb8bce8842
--- /dev/null
+++ b/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java
@@ -0,0 +1,445 @@
+/**
+ * Copyright (c) 2016 YCSB contributors. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+package com.yahoo.ycsb.db;
+
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Properties;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.util.Set;
+import java.util.Vector;
+import java.util.concurrent.ExecutionException;
+
+import com.google.bigtable.repackaged.com.google.protobuf.ByteString;
+import com.google.bigtable.repackaged.com.google.protobuf.ServiceException;
+import com.google.bigtable.v1.Column;
+import com.google.bigtable.v1.Family;
+import com.google.bigtable.v1.MutateRowRequest;
+import com.google.bigtable.v1.Mutation;
+import com.google.bigtable.v1.ReadRowsRequest;
+import com.google.bigtable.v1.Row;
+import com.google.bigtable.v1.RowFilter;
+import com.google.bigtable.v1.RowRange;
+import com.google.bigtable.v1.Mutation.DeleteFromRow;
+import com.google.bigtable.v1.Mutation.SetCell;
+import com.google.bigtable.v1.RowFilter.Chain.Builder;
+import com.google.cloud.bigtable.config.BigtableOptions;
+import com.google.cloud.bigtable.grpc.BigtableDataClient;
+import com.google.cloud.bigtable.grpc.BigtableSession;
+import com.google.cloud.bigtable.grpc.async.AsyncExecutor;
+import com.google.cloud.bigtable.grpc.async.HeapSizeManager;
+import com.google.cloud.bigtable.hbase.BigtableOptionsFactory;
+import com.google.cloud.bigtable.util.ByteStringer;
+import com.yahoo.ycsb.ByteArrayByteIterator;
+import com.yahoo.ycsb.ByteIterator;
+import com.yahoo.ycsb.DBException;
+import com.yahoo.ycsb.Status;
+
+/**
+ * Google Bigtable Proto client for YCSB framework.
+ * 
+ * Bigtable offers two APIs. These include a native Protobuf GRPC API as well as 
+ * an HBase API wrapper for the GRPC API. This client implements the Protobuf 
+ * API to test the underlying calls wrapped up in the HBase API. To use the 
+ * HBase API, see the hbase10 client binding.
+ */
+public class GoogleBigtableClient extends com.yahoo.ycsb.DB {
+  public static final Charset UTF8_CHARSET = Charset.forName("UTF8");
+  
+  /** Property names for the CLI. */
+  private static final String ASYNC_MUTATOR_MAX_MEMORY = "mutatorMaxMemory";
+  private static final String ASYNC_MAX_INFLIGHT_RPCS = "mutatorMaxInflightRPCs";
+  private static final String CLIENT_SIDE_BUFFERING = "clientbuffering";
+  
+  /** Tracks running thread counts so we know when to close the session. */ 
+  private static int threadCount = 0;
+  
+  /** This will load the hbase-site.xml config file and/or store CLI options. */
+  private static final Configuration CONFIG = HBaseConfiguration.create();
+  
+  /** Print debug information to standard out. */
+  private boolean debug = false;
+  
+  /** Global Bigtable native API objects. */ 
+  private static BigtableOptions options;
+  private static BigtableSession session;
+  
+  /** Thread loacal Bigtable native API objects. */
+  private BigtableDataClient client;
+  private HeapSizeManager heapSizeManager;
+  private AsyncExecutor asyncExecutor;
+  
+  /** The column family use for the workload. */
+  private byte[] columnFamilyBytes;
+  
+  /** Cache for the last table name/ID to avoid byte conversions. */
+  private String lastTable = "";
+  private byte[] lastTableBytes;
+  
+  /**
+   * If true, buffer mutations on the client. For measuring insert/update/delete 
+   * latencies, client side buffering should be disabled.
+   */
+  private boolean clientSideBuffering = false;
+
+  @Override
+  public void init() throws DBException {
+    Properties props = getProperties();
+    
+    // Defaults the user can override if needed
+    CONFIG.set("google.bigtable.auth.service.account.enable", "true");
+    
+    // make it easy on ourselves by copying all CLI properties into the config object.
+    final Iterator<Entry<Object, Object>> it = props.entrySet().iterator();
+    while (it.hasNext()) {
+      Entry<Object, Object> entry = it.next();
+      CONFIG.set((String)entry.getKey(), (String)entry.getValue());
+    }
+    
+    clientSideBuffering = getProperties().getProperty(CLIENT_SIDE_BUFFERING, "false")
+        .equals("true") ? true : false;
+    
+    System.err.println("Running Google Bigtable with Proto API" +
+         (clientSideBuffering ? " and client side buffering." : "."));
+    
+    synchronized (CONFIG) {
+      ++threadCount;
+      if (session == null) {
+        try {
+          options = BigtableOptionsFactory.fromConfiguration(CONFIG);
+          session = new BigtableSession(options);
+          // important to instantiate the first client here, otherwise the
+          // other threads may receive an NPE from the options when they try
+          // to read the cluster name.
+          client = session.getDataClient();
+        } catch (IOException e) {
+          throw new DBException("Error loading options from config: ", e);
+        }
+      } else {
+        client = session.getDataClient();
+      }
+      
+      if (clientSideBuffering) {
+        heapSizeManager = new HeapSizeManager(
+            Long.parseLong(
+                getProperties().getProperty(ASYNC_MUTATOR_MAX_MEMORY, 
+                    Long.toString(AsyncExecutor.ASYNC_MUTATOR_MAX_MEMORY_DEFAULT))),
+            Integer.parseInt(
+                getProperties().getProperty(ASYNC_MAX_INFLIGHT_RPCS, 
+                    Integer.toString(AsyncExecutor.MAX_INFLIGHT_RPCS_DEFAULT))));
+        asyncExecutor = new AsyncExecutor(client, heapSizeManager);
+      }
+    }
+    
+    if ((getProperties().getProperty("debug") != null)
+        && (getProperties().getProperty("debug").compareTo("true") == 0)) {
+      debug = true;
+    }
+    
+    final String columnFamily = getProperties().getProperty("columnfamily");
+    if (columnFamily == null) {
+      System.err.println("Error, must specify a columnfamily for Bigtable table");
+      throw new DBException("No columnfamily specified");
+    }
+    columnFamilyBytes = Bytes.toBytes(columnFamily);
+  }
+  
+  @Override
+  public void cleanup() throws DBException {
+    if (asyncExecutor != null) {
+      try {
+        asyncExecutor.flush();
+      } catch (IOException e) {
+        throw new DBException(e);
+      }
+    }
+    synchronized (CONFIG) {
+      --threadCount;
+      if (threadCount <= 0) {
+        try {
+          session.close();
+        } catch (IOException e) {
+          throw new DBException(e);
+        }
+      }
+    }
+  }
+  
+  @Override
+  public Status read(String table, String key, Set<String> fields,
+      HashMap<String, ByteIterator> result) {
+    if (debug) {
+      System.out.println("Doing read from Bigtable columnfamily " 
+          + new String(columnFamilyBytes));
+      System.out.println("Doing read for key: " + key);
+    }
+    
+    setTable(table);
+    
+    RowFilter filter = RowFilter.newBuilder()
+        .setFamilyNameRegexFilterBytes(ByteStringer.wrap(columnFamilyBytes))
+        .build();
+    if (fields != null && fields.size() > 0) {
+      Builder filterChain = RowFilter.Chain.newBuilder();
+      filterChain.addFilters(filter);
+      filterChain.addFilters(RowFilter.newBuilder()
+          .setCellsPerColumnLimitFilter(1)
+          .build());
+      int count = 0;
+      // usually "field#" so pre-alloc
+      final StringBuilder regex = new StringBuilder(fields.size() * 6);
+      for (final String field : fields) {
+        if (count++ > 0) {
+          regex.append("|");
+        }
+        regex.append(field);
+      }
+      filterChain.addFilters(RowFilter.newBuilder()
+          .setColumnQualifierRegexFilter(
+              ByteStringer.wrap(regex.toString().getBytes()))).build();
+      filter = RowFilter.newBuilder().setChain(filterChain.build()).build();
+    }
+    
+    final ReadRowsRequest.Builder rrr = ReadRowsRequest.newBuilder()
+        .setTableNameBytes(ByteStringer.wrap(lastTableBytes))
+        .setFilter(filter)
+        .setRowKey(ByteStringer.wrap(key.getBytes()));
+    
+    List<Row> rows;
+    try {
+      rows = client.readRowsAsync(rrr.build()).get();
+      if (rows == null || rows.isEmpty()) {
+        return Status.NOT_FOUND;
+      }
+      for (final Row row : rows) {
+        for (final Family family : row.getFamiliesList()) {
+          if (Arrays.equals(family.getNameBytes().toByteArray(), columnFamilyBytes)) {
+            for (final Column column : family.getColumnsList()) {
+              // we should only have a single cell per column
+              result.put(column.getQualifier().toString(UTF8_CHARSET), 
+                  new ByteArrayByteIterator(column.getCells(0).getValue().toByteArray()));
+              if (debug) {
+                System.out.println(
+                    "Result for field: " + column.getQualifier().toString(UTF8_CHARSET)
+                        + " is: " + column.getCells(0).getValue().toString(UTF8_CHARSET));
+              }
+            }
+          }
+        }
+      }
+      
+      return Status.OK;
+    } catch (InterruptedException e) {
+      System.err.println("Interrupted during get: " + e);
+      Thread.currentThread().interrupt();
+      return Status.ERROR;
+    } catch (ExecutionException e) {
+      System.err.println("Exception during get: " + e);
+      return Status.ERROR;
+    }
+  }
+
+  @Override
+  public Status scan(String table, String startkey, int recordcount,
+      Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
+    setTable(table);
+    
+    RowFilter filter = RowFilter.newBuilder()
+        .setFamilyNameRegexFilterBytes(ByteStringer.wrap(columnFamilyBytes))
+        .build();
+    if (fields != null && fields.size() > 0) {
+      Builder filterChain = RowFilter.Chain.newBuilder();
+      filterChain.addFilters(filter);
+      filterChain.addFilters(RowFilter.newBuilder()
+          .setCellsPerColumnLimitFilter(1)
+          .build());
+      int count = 0;
+      // usually "field#" so pre-alloc
+      final StringBuilder regex = new StringBuilder(fields.size() * 6);
+      for (final String field : fields) {
+        if (count++ > 0) {
+          regex.append("|");
+        }
+        regex.append(field);
+      }
+      filterChain.addFilters(RowFilter.newBuilder()
+          .setColumnQualifierRegexFilter(
+              ByteStringer.wrap(regex.toString().getBytes()))).build();
+      filter = RowFilter.newBuilder().setChain(filterChain.build()).build();
+    }
+    
+    final RowRange range = RowRange.newBuilder()
+        .setStartKey(ByteStringer.wrap(startkey.getBytes()))
+        .build();
+    
+    final ReadRowsRequest.Builder rrr = ReadRowsRequest.newBuilder()
+        .setTableNameBytes(ByteStringer.wrap(lastTableBytes))
+        .setFilter(filter)
+        .setRowRange(range);
+    
+    List<Row> rows;
+    try {
+      rows = client.readRowsAsync(rrr.build()).get();
+      if (rows == null || rows.isEmpty()) {
+        return Status.NOT_FOUND;
+      }
+      int numResults = 0;
+      
+      for (final Row row : rows) {
+        final HashMap<String, ByteIterator> rowResult =
+            new HashMap<String, ByteIterator>(fields != null ? fields.size() : 10);
+        
+        for (final Family family : row.getFamiliesList()) {
+          if (Arrays.equals(family.getNameBytes().toByteArray(), columnFamilyBytes)) {
+            for (final Column column : family.getColumnsList()) {
+              // we should only have a single cell per column
+              rowResult.put(column.getQualifier().toString(UTF8_CHARSET), 
+                  new ByteArrayByteIterator(column.getCells(0).getValue().toByteArray()));
+              if (debug) {
+                System.out.println(
+                    "Result for field: " + column.getQualifier().toString(UTF8_CHARSET)
+                        + " is: " + column.getCells(0).getValue().toString(UTF8_CHARSET));
+              }
+            }
+          }
+        }
+        
+        result.add(rowResult);
+        
+        numResults++;
+        if (numResults >= recordcount) {// if hit recordcount, bail out
+          break;
+        }
+      }
+      return Status.OK;
+    } catch (InterruptedException e) {
+      System.err.println("Interrupted during scan: " + e);
+      Thread.currentThread().interrupt();
+      return Status.ERROR;
+    } catch (ExecutionException e) {
+      System.err.println("Exception during scan: " + e);
+      return Status.ERROR;
+    }
+  }
+
+  @Override
+  public Status update(String table, String key,
+      HashMap<String, ByteIterator> values) {
+    if (debug) {
+      System.out.println("Setting up put for key: " + key);
+    }
+    
+    setTable(table);
+    
+    final MutateRowRequest.Builder rowMutation = MutateRowRequest.newBuilder();
+    rowMutation.setRowKey(ByteString.copyFromUtf8(key));
+    rowMutation.setTableNameBytes(ByteStringer.wrap(lastTableBytes));
+    
+    for (final Entry<String, ByteIterator> entry : values.entrySet()) {
+      final Mutation.Builder mutationBuilder = rowMutation.addMutationsBuilder();
+      final SetCell.Builder setCellBuilder = mutationBuilder.getSetCellBuilder();
+      
+      setCellBuilder.setFamilyNameBytes(ByteStringer.wrap(columnFamilyBytes));
+      setCellBuilder.setColumnQualifier(ByteStringer.wrap(entry.getKey().getBytes()));
+      setCellBuilder.setValue(ByteStringer.wrap(entry.getValue().toArray()));
+
+      // Bigtable uses a 1ms granularity
+      setCellBuilder.setTimestampMicros(System.currentTimeMillis() * 1000);
+    }
+    
+    try {
+      if (clientSideBuffering) {
+        asyncExecutor.mutateRowAsync(rowMutation.build());
+      } else {
+        client.mutateRow(rowMutation.build());
+      }
+      return Status.OK;
+    } catch (ServiceException e) {
+      System.err.println("Failed to insert key: " + key + " " + e.getMessage());
+      return Status.ERROR;
+    } catch (InterruptedException e) {
+      System.err.println("Interrupted while inserting key: " + key + " " 
+          + e.getMessage());
+      Thread.currentThread().interrupt();
+      return Status.ERROR; // never get here, but lets make the compiler happy
+    }
+  }
+
+  @Override
+  public Status insert(String table, String key,
+      HashMap<String, ByteIterator> values) {
+    return update(table, key, values);
+  }
+
+  @Override
+  public Status delete(String table, String key) {
+    if (debug) {
+      System.out.println("Doing delete for key: " + key);
+    }
+    
+    setTable(table);
+    
+    final MutateRowRequest.Builder rowMutation = MutateRowRequest.newBuilder()
+        .setRowKey(ByteString.copyFromUtf8(key))
+        .setTableNameBytes(ByteStringer.wrap(lastTableBytes));
+    rowMutation.addMutationsBuilder().setDeleteFromRow(
+        DeleteFromRow.getDefaultInstance());
+    
+    try {
+      if (clientSideBuffering) {
+        asyncExecutor.mutateRowAsync(rowMutation.build());
+      } else {
+        client.mutateRow(rowMutation.build());
+      }
+      return Status.OK;
+    } catch (ServiceException e) {
+      System.err.println("Failed to delete key: " + key + " " + e.getMessage());
+      return Status.ERROR;
+    } catch (InterruptedException e) {
+      System.err.println("Interrupted while delete key: " + key + " " 
+          + e.getMessage());
+      Thread.currentThread().interrupt();
+      return Status.ERROR; // never get here, but lets make the compiler happy
+    }
+  }
+
+  /**
+   * Little helper to set the table byte array. If it's different than the last
+   * table we reset the byte array. Otherwise we just use the existing array.
+   * @param table The table we're operating against
+   */
+  private void setTable(final String table) {
+    if (!lastTable.equals(table)) {
+      lastTable = table;
+      lastTableBytes = options
+          .getClusterName()
+          .toTableName(table)
+          .toString()
+          .getBytes();
+    }
+  }
+  
+}
\ No newline at end of file
diff --git a/googlebigtable/src/main/java/com/yahoo/ycsb/db/package-info.java b/googlebigtable/src/main/java/com/yahoo/ycsb/db/package-info.java
new file mode 100644
index 0000000000000000000000000000000000000000..f0ab9e749b7ee9133db04737dcfe0986051ab745
--- /dev/null
+++ b/googlebigtable/src/main/java/com/yahoo/ycsb/db/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2016 YCSB contributors. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+
+/**
+ * The YCSB binding for Google's <a href="https://cloud.google.com/bigtable/">
+ * Bigtable</a>.
+ */
+package com.yahoo.ycsb.db;
diff --git a/googledatastore/README.md b/googledatastore/README.md
index a6755a6522ee100c5ab1a3c3ebad088a2ba31d7b..80b6a4cf8a12d95213a1a37c7a2f42588b132b6d 100644
--- a/googledatastore/README.md
+++ b/googledatastore/README.md
@@ -20,6 +20,9 @@ LICENSE file.
 
 https://cloud.google.com/datastore/docs/concepts/overview?hl=en
 
+Please refer [here] (https://cloud.google.com/datastore/docs/apis/overview) for more information on
+Google Cloud Datastore API.
+
 ## Configure
 
     YCSB_HOME - YCSB home directory
@@ -44,7 +47,7 @@ A. Configuration and setup:
 See this link for instructions about setting up Google Cloud Datastore and
 authentication:
 
-https://cloud.google.com/datastore/docs/getstarted/start_java/
+https://cloud.google.com/datastore/docs/activate#accessing_the_datastore_api_from_another_platform
 
 After you setup your environment, you will have 3 pieces of information ready:
 - datasetId,
diff --git a/googledatastore/conf/googledatastore.properties b/googledatastore/conf/googledatastore.properties
index ac95b570c4ec4f469d6d6ceda0c6c13b3bdd9d29..408acf0d0d025f1dc419fa404633ea60f88ea133 100644
--- a/googledatastore/conf/googledatastore.properties
+++ b/googledatastore/conf/googledatastore.properties
@@ -26,7 +26,7 @@
 
 # Google Cloud Datastore's read and update APIs do not support
 # reading or updating a select subset of properties for an entity.
-# (as of version v1beta2-rev1-3.0.2)
+# (as of version v1beta3)
 # Therefore, it's recommended that you set writeallfields and readallfields
 # to true to get stable and comparable performance numbers.
 writeallfields = true
diff --git a/googledatastore/pom.xml b/googledatastore/pom.xml
index 57db3505c288e6d27c309dbd19a0bcf1ba1fbd2e..4beef6d12a218fc999591209931676e27f1741cf 100644
--- a/googledatastore/pom.xml
+++ b/googledatastore/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
@@ -31,9 +31,9 @@ LICENSE file.
 
   <dependencies>
     <dependency>
-      <groupId>com.google.apis</groupId>
-      <artifactId>google-api-services-datastore-protobuf</artifactId>
-      <version>v1beta2-rev1-3.0.2</version>
+      <groupId>com.google.cloud.datastore</groupId>
+      <artifactId>datastore-v1beta3-proto-client</artifactId>
+      <version>1.0.0-beta.1</version>
     </dependency>
     <dependency>
       <groupId>log4j</groupId>
diff --git a/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java b/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java
index 12fc0fac96ab0b442d4e1710160ab0fadb2d8842..a3f6553427d49565c61317474b3ceaaae74ab22e 100644
--- a/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java
+++ b/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java
@@ -18,15 +18,14 @@
 package com.yahoo.ycsb.db;
 
 import com.google.api.client.auth.oauth2.Credential;
-import com.google.api.services.datastore.DatastoreV1.*;
-import com.google.api.services.datastore.DatastoreV1.CommitRequest.Mode;
-import com.google.api.services.datastore.DatastoreV1.ReadOptions
-  .ReadConsistency;
-import com.google.api.services.datastore.client.Datastore;
-import com.google.api.services.datastore.client.DatastoreException;
-import com.google.api.services.datastore.client.DatastoreFactory;
-import com.google.api.services.datastore.client.DatastoreHelper;
-import com.google.api.services.datastore.client.DatastoreOptions;
+import com.google.datastore.v1beta3.*;
+import com.google.datastore.v1beta3.CommitRequest.Mode;
+import com.google.datastore.v1beta3.ReadOptions.ReadConsistency;
+import com.google.datastore.v1beta3.client.Datastore;
+import com.google.datastore.v1beta3.client.DatastoreException;
+import com.google.datastore.v1beta3.client.DatastoreFactory;
+import com.google.datastore.v1beta3.client.DatastoreHelper;
+import com.google.datastore.v1beta3.client.DatastoreOptions;
 
 import com.yahoo.ycsb.ByteIterator;
 import com.yahoo.ycsb.DB;
@@ -165,7 +164,7 @@ public class GoogleDatastoreClient extends DB {
           serviceAccountEmail + ", Private Key File Path: " + privateKeyFile);
 
       datastore = DatastoreFactory.get().create(
-          options.credential(credential).dataset(datasetId).build());
+          options.credential(credential).projectId(datasetId).build());
 
     } catch (GeneralSecurityException exception) {
       throw new DBException("Security error connecting to the datastore: " +
@@ -184,7 +183,7 @@ public class GoogleDatastoreClient extends DB {
   public Status read(String table, String key, Set<String> fields,
           HashMap<String, ByteIterator> result) {
     LookupRequest.Builder lookupRequest = LookupRequest.newBuilder();
-    lookupRequest.addKey(buildPrimaryKey(table, key));
+    lookupRequest.addKeys(buildPrimaryKey(table, key));
     lookupRequest.getReadOptionsBuilder().setReadConsistency(
         this.readConsistency);
     // Note above, datastore lookupRequest always reads the entire entity, it
@@ -219,7 +218,7 @@ public class GoogleDatastoreClient extends DB {
     Entity entity = response.getFound(0).getEntity();
     logger.debug("Read entity: " + entity.toString());
 
-    Map<String, Value> properties = DatastoreHelper.getPropertyMap(entity);
+    Map<String, Value> properties = entity.getProperties();
     Set<String> propertiesToReturn =
         (fields == null ? properties.keySet() : fields);
 
@@ -267,11 +266,11 @@ public class GoogleDatastoreClient extends DB {
 
     if (this.entityGroupingMode == EntityGroupingMode.MULTI_ENTITY_PER_GROUP) {
       // All entities are in side the same group when we are in this mode.
-      result.addPathElement(Key.PathElement.newBuilder().setKind(table).
+      result.addPath(Key.PathElement.newBuilder().setKind(table).
           setName(rootEntityName));
     }
 
-    return result.addPathElement(Key.PathElement.newBuilder().setKind(table)
+    return result.addPath(Key.PathElement.newBuilder().setKind(table)
         .setName(key));
   }
 
@@ -289,25 +288,25 @@ public class GoogleDatastoreClient extends DB {
     commitRequest.setMode(Mode.NON_TRANSACTIONAL);
 
     if (mutationType == MutationType.DELETE) {
-      commitRequest.getMutationBuilder().addDelete(datastoreKey);
+      commitRequest.addMutationsBuilder().setDelete(datastoreKey);
 
     } else {
       // If this is not for delete, build the entity.
       Entity.Builder entityBuilder = Entity.newBuilder();
       entityBuilder.setKey(datastoreKey);
       for (Entry<String, ByteIterator> val : values.entrySet()) {
-        entityBuilder.addProperty(Property.newBuilder()
-            .setName(val.getKey())
-            .setValue(Value.newBuilder()
-                .setStringValue(val.getValue().toString())));
+        entityBuilder.getMutableProperties()
+            .put(val.getKey(),
+                Value.newBuilder()
+                .setStringValue(val.getValue().toString()).build());
       }
       Entity entity = entityBuilder.build();
       logger.debug("entity built as: " + entity.toString());
 
       if (mutationType == MutationType.UPSERT) {
-        commitRequest.getMutationBuilder().addUpsert(entity);
+        commitRequest.addMutationsBuilder().setUpsert(entity);
       } else if (mutationType == MutationType.UPDATE){
-        commitRequest.getMutationBuilder().addUpdate(entity);
+        commitRequest.addMutationsBuilder().setUpdate(entity);
       } else {
         throw new RuntimeException("Impossible MutationType, code bug.");
       }
diff --git a/hbase094/pom.xml b/hbase094/pom.xml
index ca7d4c60f62525c841e16b1b98f947fedddfc4b5..1823b41f0e506129a4842ba9b525924a33e04ad3 100644
--- a/hbase094/pom.xml
+++ b/hbase094/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent/</relativePath>
   </parent>
 
diff --git a/hbase098/README.md b/hbase098/README.md
index e6a7fb4158f64c7d3c19e2c7e4c96a94a7cb612c..83c3c7a084743435736c88c6e840cc44e9829690 100644
--- a/hbase098/README.md
+++ b/hbase098/README.md
@@ -1,5 +1,5 @@
 <!--
-Copyright (c) 2015 YCSB contributors. All rights reserved.
+Copyright (c) 2015-2016 YCSB contributors. All rights reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License"); you
 may not use this file except in compliance with the License. You
@@ -77,3 +77,6 @@ Following options can be configurable using `-p`.
 * `principal`: If testing need to be done against a secure HBase cluster using Kerberos Keytab, 
   this property can be used to pass the principal in the keytab file.
 * `keytab`: The Kerberos keytab file name and location can be passed through this property.
+* `writebuffersize`: The maximum amount, in bytes, of data to buffer on the client side before a flush is forced. The default is 12MB.
+
+Additional HBase settings should be provided in the `hbase-site.xml` file located in your `/HBASE-HOME-DIR/conf` directory. Typically this will be `/etc/hbase/conf`.
\ No newline at end of file
diff --git a/hbase098/pom.xml b/hbase098/pom.xml
index 29600f7f28c5126af931f393d04ca8da04c051f6..18efe411540245269d8ad1e1ac6e9a048216f5da 100644
--- a/hbase098/pom.xml
+++ b/hbase098/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent/</relativePath>
   </parent>
 
diff --git a/hbase10/README.md b/hbase10/README.md
index 1da5bc4369aa552e2ae158c217fc11b044e8cff8..dd01249edbf169b6ab112af34c1a157dad4da067 100644
--- a/hbase10/README.md
+++ b/hbase10/README.md
@@ -1,5 +1,5 @@
 <!--
-Copyright (c) 2015 YCSB contributors. All rights reserved.
+Copyright (c) 2015-2016 YCSB contributors. All rights reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License"); you
 may not use this file except in compliance with the License. You
@@ -16,8 +16,101 @@ LICENSE file.
 -->
 
 # HBase (1.0.x) Driver for YCSB
-This driver is a binding for the YCSB facilities to operate against a HBase 1.0.x Server cluster.
+This driver is a binding for the YCSB facilities to operate against a HBase 1.0.x Server cluster or Google's hosted Bigtable.
 To run against an HBase 0.94.x cluster, use the `hbase094` binding.
 To run against an HBase 0.98.x cluster, use the `hbase098` binding.
 
-See `hbase098/README.md` for configuration details.
+See `hbase098/README.md` for a quickstart to setup HBase for load testing and common configuration details.
+
+## Configuration Options
+In addition to those options available for the `hbase098` binding, the following options are available for the `hbase10` binding:
+
+* `durability`: Whether or not writes should be appended to the WAL. Bypassing the WAL can improve throughput but data cannot be recovered in the event of a crash. The default is true.
+
+## Bigtable
+
+Google's Bigtable service provides an implementation of the HBase API for migrating existing applications. Users can perform load tests against Bigtable using this binding.
+
+### 1. Setup a Bigtable Cluster
+
+Login to the Google Cloud Console and follow the [Creating Cluster](https://cloud.google.com/bigtable/docs/creating-cluster) steps. Make a note of your cluster name, zone and project ID.
+
+### 2. Launch the Bigtable Shell
+
+From the Cloud Console, launch a shell and follow the [Quickstart](https://cloud.google.com/bigtable/docs/quickstart) up to step 4 where you launch the HBase shell.
+
+### 3. Create a Table
+
+For best results, use the pre-splitting strategy recommended in [HBASE-4163](https://issues.apache.org/jira/browse/HBASE-4163):
+
+```
+hbase(main):001:0> n_splits = 200 # HBase recommends (10 * number of regionservers)
+hbase(main):002:0> create 'usertable', 'cf', {SPLITS => (1..n_splits).map {|i| "user#{1000+i*(9999-1000)/n_splits}"}}
+```
+
+Make a note of the column family, in this example it's `cf``.
+
+### 4. Fetch the Proper ALPN Boot Jar
+
+The Bigtable protocol uses HTTP/2 which requires an ALPN protocol negotiation implementation. On JVM instantiation the implementation must be loaded before attempting to connect to the cluster. If you're using Java 7 or 8, use this [Jetty Version Table](http://www.eclipse.org/jetty/documentation/current/alpn-chapter.html#alpn-versions) to determine the version appropriate for your JVM. (ALPN is included in JDK 9+). Download the proper jar from [Maven](http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22org.mortbay.jetty.alpn%22%20AND%20a%3A%22alpn-boot%22) somewhere on your system.
+
+### 5. Download the Bigtable Client Jar
+
+Download one of the `bigtable-hbase-1.#` jars from [Maven](http://search.maven.org/#search%7Cga%7C1%7Ccom.google.cloud.bigtable) to your host.
+
+### 6. Download JSON Credentials
+
+Follow these instructions for [Generating a JSON key](https://cloud.google.com/bigtable/docs/installing-hbase-shell#service-account) and save it to your host.
+
+### 7. Create or Edit hbase-site.xml
+
+If you have an existing HBase configuration directory with an `hbase-site.xml` file, edit the file as per below. If not, create a directory called `conf` under the `hbase10` directory. Create a file in the conf directory named `hbase-site.xml`. Provide the following settings in the XML file, making sure to replace the bracketed examples with the proper values from your Cloud console.
+
+```
+<configuration>
+  <property>
+    <name>hbase.client.connection.impl</name>
+    <value>com.google.cloud.bigtable.hbase1_0.BigtableConnection</value>
+  </property>
+  <property>
+    <name>google.bigtable.cluster.name</name>
+    <value>[YOUR-CLUSTER-ID]</value>
+  </property>
+  <property>
+    <name>google.bigtable.project.id</name>
+    <value>[YOUR-PROJECT-ID]</value>
+  </property>
+  <property>
+    <name>google.bigtable.zone.name</name>
+    <value>[YOUR-ZONE-NAME]</value>
+  </property>
+  <property>
+    <name>google.bigtable.auth.service.account.enable</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>google.bigtable.auth.json.keyfile</name>
+    <value>[PATH-TO-YOUR-KEY-FILE]</value>
+  </property>
+</configuration>
+```
+
+If you wish to try other API implementations (1.1.x or 1.2.x) change the `hbase.client.connection.impl` appropriately to match the JAR you downloaded.
+
+If you have an existing HBase config directory, make sure to add it to the class path via `-cp <PATH_TO_BIGTABLE_JAR>:<CONF_DIR>`.
+
+### 8. Execute a Workload
+
+Switch to the root of the YCSB repo and choose the workload you want to run and `load` it first. With the CLI you must provide the column family, cluster properties and the ALPN jar to load.
+
+```
+bin/ycsb load hbase10 -p columnfamily=cf -cp <PATH_TO_BIGTABLE_JAR> -jvm-args='-Xbootclasspath/p:<PATH_TO_ALPN_JAR>' -P workloads/workloada
+
+```
+
+The `load` step only executes inserts into the datastore. After loading data, run the same workload to mix reads with writes.
+
+```
+bin/ycsb run hbase10 -p columnfamily=cf -jvm-args='-Xbootclasspath/p:<PATH_TO_ALPN_JAR>' -P workloads/workloada
+
+```
\ No newline at end of file
diff --git a/hbase10/pom.xml b/hbase10/pom.xml
index 3f6bec078debc77aaec15380ed744f326b2d5b30..b3b53673557bf1f712035a2c6fe1b44cbc8c4479 100644
--- a/hbase10/pom.xml
+++ b/hbase10/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent/</relativePath>
   </parent>
 
diff --git a/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java b/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java
index a41c1987325afef791e0cc5b5bfa37f9aa737bef..da72f4f86c6d0101defde14c7548c49ea887f213 100644
--- a/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java
+++ b/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java
@@ -51,6 +51,7 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 import java.util.Vector;
+import java.util.concurrent.atomic.AtomicInteger;
 
 /**
  * HBase 1.0 client for YCSB framework.
@@ -63,14 +64,22 @@ import java.util.Vector;
  */
 public class HBaseClient10 extends com.yahoo.ycsb.DB {
   private Configuration config = HBaseConfiguration.create();
-  
-  // Must be an object for synchronization and tracking running thread counts. 
-  private static Integer threadCount = 0;
+
+  private static AtomicInteger threadCount = new AtomicInteger(0);
 
   private boolean debug = false;
 
   private String tableName = "";
+
+  /**
+   * A Cluster Connection instance that is shared by all running ycsb threads.
+   * Needs to be initialized late so we pick up command-line configs if any.
+   * To ensure one instance only in a multi-threaded context, guard access
+   * with a 'lock' object.
+   * @See #CONNECTION_LOCK.
+   */
   private static Connection connection = null;
+  private static final Object CONNECTION_LOCK = new Object();
 
   // Depending on the value of clientSideBuffering, either bufferedMutator
   // (clientSideBuffering) or currentTable (!clientSideBuffering) will be used.
@@ -121,10 +130,10 @@ public class HBaseClient10 extends com.yahoo.ycsb.DB {
       UserGroupInformation.setConfiguration(config);
     }
 
-    if ((getProperties().getProperty("principal")!=null) 
+    if ((getProperties().getProperty("principal")!=null)
         && (getProperties().getProperty("keytab")!=null)) {
       try {
-        UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"), 
+        UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"),
               getProperties().getProperty("keytab"));
       } catch (IOException e) {
         System.err.println("Keytab file is not readable or not found");
@@ -133,9 +142,10 @@ public class HBaseClient10 extends com.yahoo.ycsb.DB {
     }
 
     try {
-      synchronized(threadCount) {
-        ++threadCount;
+      threadCount.getAndIncrement();
+      synchronized (CONNECTION_LOCK) {
         if (connection == null) {
+          // Initialize if not set up already.
           connection = ConnectionFactory.createConnection(config);
         }
       }
@@ -166,7 +176,9 @@ public class HBaseClient10 extends com.yahoo.ycsb.DB {
     String table = com.yahoo.ycsb.workloads.CoreWorkload.table;
     try {
       final TableName tName = TableName.valueOf(table);
-      connection.getTable(tName).getTableDescriptor();
+      synchronized (CONNECTION_LOCK) {
+        connection.getTable(tName).getTableDescriptor();
+      }
     } catch (IOException e) {
       throw new DBException(e);
     }
@@ -193,11 +205,14 @@ public class HBaseClient10 extends com.yahoo.ycsb.DB {
       long en = System.nanoTime();
       final String type = clientSideBuffering ? "UPDATE" : "CLEANUP";
       measurements.measure(type, (int) ((en - st) / 1000));
-      synchronized(threadCount) {
-        --threadCount;
-        if (threadCount <= 0 && connection != null) {
-          connection.close();
-          connection = null;
+      threadCount.decrementAndGet();
+      if (threadCount.get() <= 0) {
+        // Means we are done so ok to shut down the Connection.
+        synchronized (CONNECTION_LOCK) {
+          if (connection != null) {
+            connection.close();
+            connection = null;
+          }
         }
       }
     } catch (IOException e) {
@@ -207,14 +222,13 @@ public class HBaseClient10 extends com.yahoo.ycsb.DB {
 
   public void getHTable(String table) throws IOException {
     final TableName tName = TableName.valueOf(table);
-    this.currentTable = this.connection.getTable(tName);
-    // suggestions from
-    // http://ryantwopointoh.blogspot.com/2009/01/
-    // performance-of-hbase-importing.html
-    if (clientSideBuffering) {
-      final BufferedMutatorParams p = new BufferedMutatorParams(tName);
-      p.writeBufferSize(writeBufferSize);
-      this.bufferedMutator = this.connection.getBufferedMutator(p);
+    synchronized (CONNECTION_LOCK) {
+      this.currentTable = connection.getTable(tName);
+      if (clientSideBuffering) {
+        final BufferedMutatorParams p = new BufferedMutatorParams(tName);
+        p.writeBufferSize(writeBufferSize);
+        this.bufferedMutator = connection.getBufferedMutator(p);
+      }
     }
   }
 
diff --git a/hypertable/pom.xml b/hypertable/pom.xml
index 6c457f72777deb576ef40cf2dc160206d846b94f..991b660b0359f4aff6d7573c8d4c83f7255e0a30 100644
--- a/hypertable/pom.xml
+++ b/hypertable/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   
diff --git a/infinispan/pom.xml b/infinispan/pom.xml
index 943b1937e0b7051508905fdf31ab90aa6c04b3a8..4b3fad03eab04722fc061554c961d762e94fc4b5 100644
--- a/infinispan/pom.xml
+++ b/infinispan/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   
diff --git a/jdbc/pom.xml b/jdbc/pom.xml
index 6777033698fdb5393eb47661694c4e1043c4d29c..6ad5bc4038aec0008d58e4ae1bf3d86b913a3517 100644
--- a/jdbc/pom.xml
+++ b/jdbc/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   
diff --git a/kudu/README.md b/kudu/README.md
index cd5cffd6387a0d92a8327638a1a1d72c1f70d949..e1f2b286438411b562d9b15d8e4dab2458cf2148 100644
--- a/kudu/README.md
+++ b/kudu/README.md
@@ -42,3 +42,15 @@ Then, you can run the workload:
 ```
 bin/ycsb run kudu -P workloads/workloada
 ```
+
+## Using a previous client version
+
+If you wish to use a different Kudu client version than the one shipped with YCSB, you can specify on the
+command line with `-Dkudu.version=x`. For example:
+
+```
+mvn -pl com.yahoo.ycsb:kudu-binding -am package -DskipTests -Dkudu.version=0.7.1
+```
+
+Note that prior to 1.0, Kudu doesn't guarantee wire or API compability between versions and only the latest
+one is officially supported.
diff --git a/kudu/pom.xml b/kudu/pom.xml
index f0d3088c06b67e88170ffa31fdf9c8136e0d4d53..ec6d5d653fbe12ec40a84bd4da487e7d1c451d57 100644
--- a/kudu/pom.xml
+++ b/kudu/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
diff --git a/kudu/src/main/java/com/yahoo/ycsb/db/KuduYCSBClient.java b/kudu/src/main/java/com/yahoo/ycsb/db/KuduYCSBClient.java
index 503c574af1f7f7dbcc2ececd8c8bff9ffc291fd1..05757b41f37b06dba7e8b604f3d062470b0c4556 100644
--- a/kudu/src/main/java/com/yahoo/ycsb/db/KuduYCSBClient.java
+++ b/kudu/src/main/java/com/yahoo/ycsb/db/KuduYCSBClient.java
@@ -233,17 +233,20 @@ public class KuduYCSBClient extends com.yahoo.ycsb.DB {
         scannerBuilder.setProjectedColumnNames(querySchema);
       }
 
-      PartialRow lowerBound = schema.newPartialRow();
-      lowerBound.addString(0, startkey);
-      scannerBuilder.lowerBound(lowerBound);
+      KuduPredicate.ComparisonOp comparisonOp;
       if (recordcount == 1) {
-        PartialRow upperBound = schema.newPartialRow();
-        // Keys are fixed length, just adding something at the end is safe.
-        upperBound.addString(0, startkey.concat(" "));
-        scannerBuilder.exclusiveUpperBound(upperBound);
+        comparisonOp = KuduPredicate.ComparisonOp.EQUAL;
+      } else {
+        comparisonOp = KuduPredicate.ComparisonOp.GREATER_EQUAL;
       }
+      KuduPredicate keyPredicate = KuduPredicate.newComparisonPredicate(
+          schema.getColumnByIndex(0),
+          comparisonOp,
+          startkey);
 
-      KuduScanner scanner = scannerBuilder.limit(recordcount) // currently noop
+      KuduScanner scanner = scannerBuilder
+          .addPredicate(keyPredicate)
+          .limit(recordcount) // currently noop
           .build();
 
       while (scanner.hasMoreRows()) {
diff --git a/mapkeeper/pom.xml b/mapkeeper/pom.xml
index 6fac3795c72b15599b2b848c803ea285efe8bc5c..5ac0a611e97486d1ebb469a2cef9cc042b69b7b5 100644
--- a/mapkeeper/pom.xml
+++ b/mapkeeper/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   
diff --git a/memcached/pom.xml b/memcached/pom.xml
index 10bcbbe9467d1341d951dd0ab6265702097feccb..9c6843fe6ba0643c3dc01f9e1cbfc8d2d07d887a 100644
--- a/memcached/pom.xml
+++ b/memcached/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
diff --git a/mongodb/pom.xml b/mongodb/pom.xml
index f510d19aa272acceb9737be103c95f8e3f295ef2..8d9dcf184f2eaab4473eebe5050144fa780406b2 100644
--- a/mongodb/pom.xml
+++ b/mongodb/pom.xml
@@ -22,7 +22,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
diff --git a/nosqldb/pom.xml b/nosqldb/pom.xml
index e2823102e4452796ffd6402efbf11aebde97c78c..e11a28958b9a663e12c945550860062d33bae72f 100644
--- a/nosqldb/pom.xml
+++ b/nosqldb/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   
diff --git a/orientdb/pom.xml b/orientdb/pom.xml
index db83a942d6955e12374d3b6055f6c7843c0a7e66..2f7e3c8053e5e5832df46ad7396bf943cc0eac80 100644
--- a/orientdb/pom.xml
+++ b/orientdb/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
 
diff --git a/pom.xml b/pom.xml
index 14cd0885fbbb413a5ee0289a4613c9dbfcf10a66..0a6a072a2e673559d4e788bf3681f8183e028fe9 100644
--- a/pom.xml
+++ b/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
 
   <groupId>com.yahoo.ycsb</groupId>
   <artifactId>root</artifactId>
-  <version>0.9.0-SNAPSHOT</version>
+  <version>0.10.0-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <name>YCSB Root</name>
@@ -68,6 +68,7 @@ LICENSE file.
   <properties>
     <maven.assembly.version>2.5.5</maven.assembly.version>
     <maven.dependency.version>2.10</maven.dependency.version>
+    <asynchbase.version>1.7.1</asynchbase.version>
     <hbase094.version>0.94.27</hbase094.version>
     <hbase098.version>0.98.14-hadoop2</hbase098.version>
     <hbase10.version>1.0.2</hbase10.version>
@@ -75,9 +76,10 @@ LICENSE file.
     <cassandra.version>1.2.9</cassandra.version>
     <cassandra.cql.version>1.0.3</cassandra.cql.version>
     <cassandra2.cql.version>3.0.0</cassandra2.cql.version>
-    <geode.version>1.0.0-incubating.M1</geode.version>
+    <geode.version>1.0.0-incubating.M2</geode.version>
+    <googlebigtable.version>0.2.3</googlebigtable.version>
     <infinispan.version>7.2.2.Final</infinispan.version>
-    <kudu.version>0.6.0</kudu.version>
+    <kudu.version>0.8.0</kudu.version>
     <openjpa.jdbc.version>2.1.1</openjpa.jdbc.version>
     <!--<mapkeeper.version>1.0</mapkeeper.version>-->
     <mongodb.version>3.0.3</mongodb.version>
@@ -90,7 +92,9 @@ LICENSE file.
     <thrift.version>0.8.0</thrift.version>
     <hypertable.version>0.9.5.6</hypertable.version>
     <couchbase.version>1.4.10</couchbase.version>
+    <couchbase2.version>2.2.6</couchbase2.version>
     <tarantool.version>1.6.5</tarantool.version>
+    <riak.version>2.0.5</riak.version>
     <aerospike.version>3.1.2</aerospike.version>
     <solr.version>5.4.0</solr.version>
   </properties>
@@ -102,13 +106,16 @@ LICENSE file.
     <!-- all the datastore bindings, lex sorted please -->
     <module>accumulo</module>
     <module>aerospike</module>
+    <module>asynchbase</module>
     <module>cassandra</module>
     <module>cassandra2</module>
     <module>couchbase</module>
+    <module>couchbase2</module>
     <module>distribution</module>
     <module>dynamodb</module>
     <module>elasticsearch</module>
     <module>geode</module>
+    <module>googlebigtable</module>
     <module>googledatastore</module>
     <module>hbase094</module>
     <module>hbase098</module>
@@ -124,6 +131,7 @@ LICENSE file.
     <module>orientdb</module>
     <module>rados</module>
     <module>redis</module>
+    <module>riak</module>
     <module>s3</module>
     <module>solr</module>
     <module>tarantool</module>
diff --git a/redis/pom.xml b/redis/pom.xml
index 65dbc50bf928e8e41333a48472d2f43f530f2fd5..f05d5c092fef646fbc4222a4aa23dcd95fc695ec 100644
--- a/redis/pom.xml
+++ b/redis/pom.xml
@@ -21,7 +21,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
   </parent>
   
diff --git a/riak/README.md b/riak/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..58bd6d48807793519f84cd5862ea5e730e172adb
--- /dev/null
+++ b/riak/README.md
@@ -0,0 +1,92 @@
+<!--
+Copyright (c) 2016 YCSB contributors. All rights reserved.
+Copyright 2014 Basho Technologies, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you
+may not use this file except in compliance with the License. You
+may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied. See the License for the specific language governing
+permissions and limitations under the License. See accompanying
+LICENSE file.
+-->
+
+Riak KV Client for Yahoo! Cloud System Benchmark (YCSB)
+=======================================================
+
+The Riak KV YCSB client is designed to work with the Yahoo! Cloud System Benchmark (YCSB) project (https://github.com/brianfrankcooper/YCSB) to support performance testing for the 2.x.y line of the Riak KV database.
+
+Creating a <i>bucket-type</i> to use with YCSB
+----------------------------
+
+Perform the following operations on your Riak cluster to configure it for the benchmarks.
+
+Set the default backend for Riak to <i>LevelDB</i> in the `riak.conf` file of every node of your cluster. This is required to support <i>secondary indexes</i>, which are used for the `scan` transactions. You can do this by modifying the proper line as shown below.
+
+```
+storage_backend = leveldb
+```
+After this, create a bucket type named "ycsb"<sup id="a1">[1](#f1)</sup> by logging into one of the nodes in your cluster. Now you're ready to set up the cluster to operate using one between strong and eventual consistency model as shown in the next two subsections. 
+
+###Strong consistency model 
+
+To use the <i>strong consistency model</i> (default), you need to follow the next two steps.
+
+1. In every `riak.conf` file, search for the `##strong_consistency=on` line and uncomment it. It's important that you do this <b>before you start your cluster</b>!
+2. Run the following `riak-admin` commands:
+
+  ```
+  riak-admin bucket-type create ycsb '{"props":{"consistent":true}}'
+  riak-admin bucket-type activate ycsb
+  ```
+
+When using this model, you **may want to specify the number of replicas to create for each object**<sup id="a2">[2](#f2)</sup>: the *R* and *W* parameters (see next section) will in fact be ignored. The only information needed by this consistency model is how many nodes the system has to successfully query to consider a transaction completed. To set this parameter, you can add `"n_val":N` to the list of properties shown above (by default `N` is set to 3).
+
+####A note on the scan transactions 
+Currently, `scan` transactions are not _directly_ supported, as there is no suitable mean to perform them properly. This will not cause the benchmark to fail, it simply won't perform any scan transaction at all (these will immediately return with a `Status.NOT_IMPLEMENTED` code).
+
+However, a possible workaround has been provided: considering that Riak doesn't allow strong-consistent bucket-types to use secondary indexes, we can create an eventually consistent one just to store (*key*, *2i indexes*) pairs. This will be later used only to obtain the keys where the objects are located, which will be then used to retrieve the actual objects from the strong-consistent bucket. If you want to use this workaround, then you have to create and activate a "_fake bucket-type_" using the following commands:
+```
+riak-admin bucket-type create fakeBucketType '{"props":{"allow_mult":"false","n_val":1,"dvv_enabled":false,"last_write_wins":true}}'
+riak-admin bucket-type activate fakeBucketType
+```
+A bucket-type so defined isn't allowed to _create siblings_ (`allow_mult":"false"`), it'll have just _one replica_ (`"n_val":1`) which'll store the _last value provided_ (`"last_write_wins":true`) and _vector clocks_ will be used instead of _dotted version vectors_ (`"dvv_enabled":false`). Note that setting `"n_val":1` means that the `scan` transactions won't be much *fault-tolerant*, considering that if a node fails then a lot of them could potentially fail. You may indeed increase this value, but this choice will necessarily load the cluster with more work. So, the choice is yours to make!
+Then you have to set the `riak.strong_consistent_scans_bucket_type` property (see next section) equal to the name you gave to the aforementioned "fake bucket-type" (e.g. `fakeBucketType` in this case).
+
+Please note that this workaround involves a **double store operation for each insert transaction**, one to store the actual object and another one to save the corresponding 2i index. In practice, the client won't notice any difference, as the latter operation is performed asynchronously. However, the cluster will be obviously loaded more, and this is why the proposed "fake bucket-type" to create is as less _resource-demanding_ as possible.
+
+###Eventual consistency model
+
+If you want to use the <i>eventual consistency model</i> implemented in Riak, you have just to type: 
+```
+riak-admin bucket-type create ycsb '{"props":{"allow_mult":"false"}}'
+riak-admin bucket-type activate ycsb
+```
+
+Riak KV configuration parameters
+----------------------------
+You can either specify these configuration parameters via command line or set them in the `riak.properties` file.
+
+* `riak.hosts` - <b>string list</b>, comma separated list of IPs or FQDNs. For example: `riak.hosts=127.0.0.1,127.0.0.2,127.0.0.3` or `riak.hosts=riak1.mydomain.com,riak2.mydomain.com,riak3.mydomain.com`.
+* `riak.port` - <b>int</b>, the port on which every node is listening. It must match the one specified in the `riak.conf` file at the line `listener.protobuf.internal`.
+* `riak.bucket_type` - <b>string</b>, it must match the name of the bucket type created during setup (see section above).
+* `riak.r_val` - <b>int</b>, this value represents the number of Riak nodes that must return results for a read operation before the transaction is considered successfully completed. 
+* `riak.w_val` - <b>int</b>, this value represents the number of Riak nodes that must report success before an insert/update transaction is considered complete.
+* `riak.read_retry_count` - <b>int</b>, the number of times the client will try to read a key from Riak.
+* `riak.wait_time_before_retry` - <b>int</b>, the time (in milliseconds) before the client attempts to perform another read if the previous one failed.
+* `riak.transaction_time_limit` - <b>int</b>, the time (in seconds) the client waits before aborting the current transaction.
+* `riak.strong_consistency` - <b>boolean</b>, indicates whether to use *strong consistency* (true) or *eventual consistency* (false).
+* `riak.strong_consistent_scans_bucket_type` - **string**, indicates the bucket-type to use to allow scans transactions when using strong consistency mode.
+* `riak.debug` - <b>boolean</b>, enables debug mode. This displays all the properties (specified or defaults) when a benchmark is started. Moreover, it shows error causes whenever these occur.
+
+<b>Note</b>: For more information on workloads and how to run them please see: https://github.com/brianfrankcooper/YCSB/wiki/Running-a-Workload
+
+<b id="f1">1</b> As specified in the `riak.properties` file.  See parameters configuration section for further info. [↩](#a1)
+
+<b id="f2">2</b> More info about properly setting up a fault-tolerant cluster can be found at http://docs.basho.com/riak/kv/2.1.4/configuring/strong-consistency/#enabling-strong-consistency.[↩](#a2)
+
diff --git a/riak/pom.xml b/riak/pom.xml
new file mode 100644
index 0000000000000000000000000000000000000000..4cb303c00f7a11ecb9f46e2df1e2859b7c41eb27
--- /dev/null
+++ b/riak/pom.xml
@@ -0,0 +1,59 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2016 YCSB contributors. All rights reserved.
+Copyright 2014 Basho Technologies, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you
+may not use this file except in compliance with the License. You
+may obtain a copy of the License at
+http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied. See the License for the specific language governing
+permissions and limitations under the License. See accompanying
+LICENSE file.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>com.yahoo.ycsb</groupId>
+    <artifactId>binding-parent</artifactId>
+    <version>0.10.0-SNAPSHOT</version>
+    <relativePath>../binding-parent</relativePath>
+  </parent>
+
+  <artifactId>riak-binding</artifactId>
+  <name>Riak KV Binding</name>
+  <packaging>jar</packaging>
+
+  <dependencies>
+    <dependency>
+      <groupId>com.basho.riak</groupId>
+      <artifactId>riak-client</artifactId>
+      <version>2.0.5</version>
+    </dependency>
+    <dependency>
+      <groupId>com.yahoo.ycsb</groupId>
+      <artifactId>core</artifactId>
+      <version>${project.version}</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.collections</groupId>
+      <artifactId>google-collections</artifactId>
+      <version>1.0</version>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>4.12</version>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+</project>
\ No newline at end of file
diff --git a/riak/src/main/java/com/yahoo/ycsb/db/riak/RiakKVClient.java b/riak/src/main/java/com/yahoo/ycsb/db/riak/RiakKVClient.java
new file mode 100644
index 0000000000000000000000000000000000000000..42c3e90e4056275cfd6e705132fbff6f9b8bd62b
--- /dev/null
+++ b/riak/src/main/java/com/yahoo/ycsb/db/riak/RiakKVClient.java
@@ -0,0 +1,594 @@
+/**
+ * Copyright (c) 2016 YCSB contributors All rights reserved.
+ * Copyright 2014 Basho Technologies, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+
+package com.yahoo.ycsb.db.riak;
+
+import com.basho.riak.client.api.commands.buckets.StoreBucketProperties;
+import com.basho.riak.client.api.commands.kv.StoreValue;
+import com.basho.riak.client.api.commands.kv.UpdateValue;
+import com.basho.riak.client.core.RiakFuture;
+import com.basho.riak.client.core.query.RiakObject;
+import com.basho.riak.client.core.query.indexes.LongIntIndex;
+import com.basho.riak.client.core.util.BinaryValue;
+import com.yahoo.ycsb.*;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import com.basho.riak.client.api.RiakClient;
+import com.basho.riak.client.api.cap.Quorum;
+import com.basho.riak.client.api.commands.indexes.IntIndexQuery;
+import com.basho.riak.client.api.commands.kv.DeleteValue;
+import com.basho.riak.client.api.commands.kv.FetchValue;
+import com.basho.riak.client.core.RiakCluster;
+import com.basho.riak.client.core.RiakNode;
+import com.basho.riak.client.core.query.Location;
+import com.basho.riak.client.core.query.Namespace;
+
+import static com.yahoo.ycsb.db.riak.RiakUtils.createResultHashMap;
+import static com.yahoo.ycsb.db.riak.RiakUtils.getKeyAsLong;
+import static com.yahoo.ycsb.db.riak.RiakUtils.serializeTable;
+
+/**
+ * Riak KV 2.x.y client for YCSB framework.
+ *
+ */
+public class RiakKVClient extends DB {
+  private static final String HOST_PROPERTY = "riak.hosts";
+  private static final String PORT_PROPERTY = "riak.port";
+  private static final String BUCKET_TYPE_PROPERTY = "riak.bucket_type";
+  private static final String R_VALUE_PROPERTY = "riak.r_val";
+  private static final String W_VALUE_PROPERTY = "riak.w_val";
+  private static final String READ_RETRY_COUNT_PROPERTY = "riak.read_retry_count";
+  private static final String WAIT_TIME_BEFORE_RETRY_PROPERTY = "riak.wait_time_before_retry";
+  private static final String TRANSACTION_TIME_LIMIT_PROPERTY = "riak.transaction_time_limit";
+  private static final String STRONG_CONSISTENCY_PROPERTY = "riak.strong_consistency";
+  private static final String STRONG_CONSISTENT_SCANS_BUCKET_TYPE_PROPERTY = "riak.strong_consistent_scans_bucket_type";
+  private static final String DEBUG_PROPERTY = "riak.debug";
+
+  private static final Status TIME_OUT = new Status("TIME_OUT", "Cluster didn't respond after maximum wait time.");
+
+  private String[] hosts;
+  private int port;
+  private String bucketType;
+  private String bucketType2i;
+  private Quorum rvalue;
+  private Quorum wvalue;
+  private int readRetryCount;
+  private int waitTimeBeforeRetry;
+  private int transactionTimeLimit;
+  private boolean strongConsistency;
+  private String strongConsistentScansBucketType;
+  private boolean performStrongConsistentScans;
+  private boolean debug;
+
+  private RiakClient riakClient;
+  private RiakCluster riakCluster;
+
+  private void loadDefaultProperties() {
+    InputStream propFile = RiakKVClient.class.getClassLoader().getResourceAsStream("riak.properties");
+    Properties propsPF = new Properties(System.getProperties());
+
+    try {
+      propsPF.load(propFile);
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+
+    hosts = propsPF.getProperty(HOST_PROPERTY).split(",");
+    port = Integer.parseInt(propsPF.getProperty(PORT_PROPERTY));
+    bucketType = propsPF.getProperty(BUCKET_TYPE_PROPERTY);
+    rvalue = new Quorum(Integer.parseInt(propsPF.getProperty(R_VALUE_PROPERTY)));
+    wvalue = new Quorum(Integer.parseInt(propsPF.getProperty(W_VALUE_PROPERTY)));
+    readRetryCount = Integer.parseInt(propsPF.getProperty(READ_RETRY_COUNT_PROPERTY));
+    waitTimeBeforeRetry = Integer.parseInt(propsPF.getProperty(WAIT_TIME_BEFORE_RETRY_PROPERTY));
+    transactionTimeLimit = Integer.parseInt(propsPF.getProperty(TRANSACTION_TIME_LIMIT_PROPERTY));
+    strongConsistency = Boolean.parseBoolean(propsPF.getProperty(STRONG_CONSISTENCY_PROPERTY));
+    strongConsistentScansBucketType = propsPF.getProperty(STRONG_CONSISTENT_SCANS_BUCKET_TYPE_PROPERTY);
+    debug = Boolean.parseBoolean(propsPF.getProperty(DEBUG_PROPERTY));
+  }
+
+  private void loadProperties() {
+    // First, load the default properties...
+    loadDefaultProperties();
+
+    // ...then, check for some props set at command line!
+    Properties props = getProperties();
+
+    String portString = props.getProperty(PORT_PROPERTY);
+    if (portString != null) {
+      port = Integer.parseInt(portString);
+    }
+
+    String hostsString = props.getProperty(HOST_PROPERTY);
+    if (hostsString != null) {
+      hosts = hostsString.split(",");
+    }
+
+    String bucketTypeString = props.getProperty(BUCKET_TYPE_PROPERTY);
+    if (bucketTypeString != null) {
+      bucketType = bucketTypeString;
+    }
+
+    String rValueString = props.getProperty(R_VALUE_PROPERTY);
+    if (rValueString != null) {
+      rvalue = new Quorum(Integer.parseInt(rValueString));
+    }
+
+    String wValueString = props.getProperty(W_VALUE_PROPERTY);
+    if (wValueString != null) {
+      wvalue = new Quorum(Integer.parseInt(wValueString));
+    }
+
+    String readRetryCountString = props.getProperty(READ_RETRY_COUNT_PROPERTY);
+    if (readRetryCountString != null) {
+      readRetryCount = Integer.parseInt(readRetryCountString);
+    }
+
+    String waitTimeBeforeRetryString = props.getProperty(WAIT_TIME_BEFORE_RETRY_PROPERTY);
+    if (waitTimeBeforeRetryString != null) {
+      waitTimeBeforeRetry = Integer.parseInt(waitTimeBeforeRetryString);
+    }
+
+    String transactionTimeLimitString = props.getProperty(TRANSACTION_TIME_LIMIT_PROPERTY);
+    if (transactionTimeLimitString != null) {
+      transactionTimeLimit = Integer.parseInt(transactionTimeLimitString);
+    }
+
+    String strongConsistencyString = props.getProperty(STRONG_CONSISTENCY_PROPERTY);
+    if (strongConsistencyString != null) {
+      strongConsistency = Boolean.parseBoolean(strongConsistencyString);
+    }
+
+    String strongConsistentScansBucketTypeString = props.getProperty(STRONG_CONSISTENT_SCANS_BUCKET_TYPE_PROPERTY);
+    if (strongConsistentScansBucketTypeString != null) {
+      strongConsistentScansBucketType = strongConsistentScansBucketTypeString;
+    }
+
+    String debugString = props.getProperty(DEBUG_PROPERTY);
+    if (debugString != null) {
+      debug = Boolean.parseBoolean(debugString);
+    }
+  }
+
+  public void init() throws DBException {
+    loadProperties();
+
+    RiakNode.Builder builder = new RiakNode.Builder().withRemotePort(port);
+    List<RiakNode> nodes = RiakNode.Builder.buildNodes(builder, Arrays.asList(hosts));
+    riakCluster = new RiakCluster.Builder(nodes).build();
+
+    try {
+      riakCluster.start();
+      riakClient = new RiakClient(riakCluster);
+    } catch (Exception e) {
+      System.err.println("Unable to properly start up the cluster. Reason: " + e.toString());
+      throw new DBException(e);
+    }
+
+    // If strong consistency is in use, we need to change the bucket-type where the 2i indexes will be stored.
+    if (strongConsistency && !strongConsistentScansBucketType.isEmpty()) {
+      // The 2i indexes have to be stored in the appositely created strongConsistentScansBucketType: this however has
+      // to be done only if the user actually created it! So, if the latter doesn't exist, then the scan transactions
+      // will not be performed at all.
+      bucketType2i = strongConsistentScansBucketType;
+      performStrongConsistentScans = true;
+    } else {
+      // If instead eventual consistency is in use, then the 2i indexes have to be stored in the bucket-type
+      // indicated with the bucketType variable.
+      bucketType2i = bucketType;
+      performStrongConsistentScans = false;
+    }
+
+    if (debug) {
+      System.err.println("DEBUG ENABLED. Configuration parameters:");
+      System.err.println("-----------------------------------------");
+      System.err.println("Hosts: " + Arrays.toString(hosts));
+      System.err.println("Port: " + port);
+      System.err.println("Bucket Type: " + bucketType);
+      System.err.println("R Val: " + rvalue.toString());
+      System.err.println("W Val: " + wvalue.toString());
+      System.err.println("Read Retry Count: " + readRetryCount);
+      System.err.println("Wait Time Before Retry: " + waitTimeBeforeRetry + " ms");
+      System.err.println("Transaction Time Limit: " + transactionTimeLimit + " s");
+      System.err.println("Consistency model: " + (strongConsistency ? "Strong" : "Eventual"));
+
+      if (strongConsistency) {
+        System.err.println("Strong Consistent Scan Transactions " +  (performStrongConsistentScans ? "" : "NOT ") +
+            "allowed.");
+      }
+    }
+  }
+
+  /**
+   * Read a record from the database. Each field/value pair from the result will be stored in a HashMap.
+   *
+   * @param table  The name of the table (Riak bucket)
+   * @param key    The record key of the record to read.
+   * @param fields The list of fields to read, or null for all of them
+   * @param result A HashMap of field/value pairs for the result
+   * @return Zero on success, a non-zero error code on error
+   */
+  @Override
+  public Status read(String table, String key, Set<String> fields, HashMap<String, ByteIterator> result) {
+    Location location = new Location(new Namespace(bucketType, table), key);
+    FetchValue fv = new FetchValue.Builder(location).withOption(FetchValue.Option.R, rvalue).build();
+    FetchValue.Response response;
+
+    try {
+      response = fetch(fv);
+
+      if (response.isNotFound()) {
+        if (debug) {
+          System.err.println("Unable to read key " + key + ". Reason: NOT FOUND");
+        }
+
+        return Status.NOT_FOUND;
+      }
+    } catch (TimeoutException e) {
+      if (debug) {
+        System.err.println("Unable to read key " + key + ". Reason: TIME OUT");
+      }
+
+      return TIME_OUT;
+    } catch (Exception e) {
+      if (debug) {
+        System.err.println("Unable to read key " + key + ". Reason: " + e.toString());
+      }
+
+      return Status.ERROR;
+    }
+
+    // Create the result HashMap.
+    createResultHashMap(fields, response, result);
+
+    return Status.OK;
+  }
+
+  /**
+   * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored in
+   * a HashMap.
+   * Note: The scan operation requires the use of secondary indexes (2i) and LevelDB.
+   *
+   * @param table       The name of the table (Riak bucket)
+   * @param startkey    The record key of the first record to read.
+   * @param recordcount The number of records to read
+   * @param fields      The list of fields to read, or null for all of them
+   * @param result      A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
+   * @return Zero on success, a non-zero error code on error
+   */
+  @Override
+  public Status scan(String table, String startkey, int recordcount, Set<String> fields,
+                     Vector<HashMap<String, ByteIterator>> result) {
+    if (strongConsistency && !performStrongConsistentScans) {
+      return Status.NOT_IMPLEMENTED;
+    }
+
+    // The strong consistent bucket-type is not capable of storing 2i indexes. So, we need to read them from the fake
+    // one (which we use only to store indexes). This is why, when using such a consistency model, the bucketType2i
+    // variable is set to FAKE_BUCKET_TYPE.
+    IntIndexQuery iiq = new IntIndexQuery
+        .Builder(new Namespace(bucketType2i, table), "key", getKeyAsLong(startkey), Long.MAX_VALUE)
+        .withMaxResults(recordcount)
+        .withPaginationSort(true)
+        .build();
+
+    Location location;
+    RiakFuture<IntIndexQuery.Response, IntIndexQuery> future = riakClient.executeAsync(iiq);
+
+    try {
+      IntIndexQuery.Response response = future.get(transactionTimeLimit, TimeUnit.SECONDS);
+      List<IntIndexQuery.Response.Entry> entries = response.getEntries();
+
+      // If no entries were retrieved, then something bad happened...
+      if (entries.size() == 0) {
+        if (debug) {
+          System.err.println("Unable to scan any record starting from key " + startkey + ", aborting transaction. " +
+              "Reason: NOT FOUND");
+        }
+
+        return Status.NOT_FOUND;
+      }
+
+      for (IntIndexQuery.Response.Entry entry : entries) {
+        // If strong consistency is in use, then the actual location of the object we want to read is obtained by
+        // fetching the key from the one retrieved with the 2i indexes search operation.
+        if (strongConsistency) {
+          location = new Location(new Namespace(bucketType, table), entry.getRiakObjectLocation().getKeyAsString());
+        } else {
+          location = entry.getRiakObjectLocation();
+        }
+
+        FetchValue fv = new FetchValue.Builder(location)
+            .withOption(FetchValue.Option.R, rvalue)
+            .build();
+
+        FetchValue.Response keyResponse = fetch(fv);
+
+        if (keyResponse.isNotFound()) {
+          if (debug) {
+            System.err.println("Unable to scan all requested records starting from key " + startkey + ", aborting " +
+                "transaction. Reason: NOT FOUND");
+          }
+
+          return Status.NOT_FOUND;
+        }
+
+        // Create the partial result to add to the result vector.
+        HashMap<String, ByteIterator> partialResult = new HashMap<>();
+        createResultHashMap(fields, keyResponse, partialResult);
+        result.add(partialResult);
+      }
+    } catch (TimeoutException e) {
+      if (debug) {
+        System.err.println("Unable to scan all requested records starting from key " + startkey + ", aborting " +
+            "transaction. Reason: TIME OUT");
+      }
+
+      return TIME_OUT;
+    } catch (Exception e) {
+      if (debug) {
+        System.err.println("Unable to scan all records starting from key " + startkey + ", aborting transaction. " +
+            "Reason: " + e.toString());
+      }
+
+      return Status.ERROR;
+    }
+
+    return Status.OK;
+  }
+
+  /**
+   * Tries to perform a read and, whenever it fails, retries to do it. It actually does try as many time as indicated,
+   * even if the function riakClient.execute(fv) throws an exception. This is needed for those situation in which the
+   * cluster is unable to respond properly due to overload. Note however that if the cluster doesn't respond after
+   * transactionTimeLimit, the transaction is discarded immediately.
+   *
+   * @param fv The value to fetch from the cluster.
+   */
+  private FetchValue.Response fetch(FetchValue fv) throws TimeoutException {
+    FetchValue.Response response = null;
+
+    for (int i = 0; i < readRetryCount; i++) {
+      RiakFuture<FetchValue.Response, Location> future = riakClient.executeAsync(fv);
+
+      try {
+        response = future.get(transactionTimeLimit, TimeUnit.SECONDS);
+
+        if (!response.isNotFound()) {
+          break;
+        }
+      } catch (TimeoutException e) {
+        // Let the callee decide how to handle this exception...
+        throw new TimeoutException();
+      } catch (Exception e) {
+        // Sleep for a few ms before retrying...
+        try {
+          Thread.sleep(waitTimeBeforeRetry);
+        } catch (InterruptedException e1) {
+          e1.printStackTrace();
+        }
+      }
+    }
+
+    return response;
+  }
+
+  /**
+   * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the
+   * record with the specified record key. Also creates a secondary index (2i) for each record consisting of the key
+   * converted to long to be used for the scan operation.
+   *
+   * @param table  The name of the table (Riak bucket)
+   * @param key    The record key of the record to insert.
+   * @param values A HashMap of field/value pairs to insert in the record
+   * @return Zero on success, a non-zero error code on error
+   */
+  @Override
+  public Status insert(String table, String key, HashMap<String, ByteIterator> values) {
+    Location location = new Location(new Namespace(bucketType, table), key);
+    RiakObject object = new RiakObject();
+
+    // Strong consistency doesn't support secondary indexing, but eventually consistent model does. So, we can mock a
+    // 2i usage by creating a fake object stored in an eventually consistent bucket-type with the SAME KEY THAT THE
+    // ACTUAL OBJECT HAS. This latter is obviously stored in the strong consistent bucket-type indicated with the
+    // riak.bucket_type property.
+    if (strongConsistency && performStrongConsistentScans) {
+      // Create a fake object to store in the default bucket-type just to keep track of the 2i indices.
+      Location fakeLocation = new Location(new Namespace(strongConsistentScansBucketType, table), key);
+
+      // Obviously, we want the fake object to contain as less data as possible. We can't create a void object, so
+      // we have to choose the minimum data size allowed: it is one byte.
+      RiakObject fakeObject = new RiakObject();
+      fakeObject.setValue(BinaryValue.create(new byte[]{0x00}));
+      fakeObject.getIndexes().getIndex(LongIntIndex.named("key_int")).add(getKeyAsLong(key));
+
+      StoreValue fakeStore = new StoreValue.Builder(fakeObject)
+          .withLocation(fakeLocation)
+          .build();
+
+      // We don't mind whether the operation is finished or not, because waiting for it to complete would slow down the
+      // client and make our solution too heavy to be seen as a valid compromise. This will obviously mean that under
+      // heavy load conditions a scan operation could fail due to an unfinished "fakeStore".
+      riakClient.executeAsync(fakeStore);
+    } else if (!strongConsistency) {
+      // The next operation is useless when using strong consistency model, so it's ok to perform it only when using
+      // eventual consistency.
+      object.getIndexes().getIndex(LongIntIndex.named("key_int")).add(getKeyAsLong(key));
+    }
+
+    // Store proper values into the object.
+    object.setValue(BinaryValue.create(serializeTable(values)));
+
+    StoreValue store = new StoreValue.Builder(object)
+        .withOption(StoreValue.Option.W, wvalue)
+        .withLocation(location)
+        .build();
+
+    RiakFuture<StoreValue.Response, Location> future = riakClient.executeAsync(store);
+
+    try {
+      future.get(transactionTimeLimit, TimeUnit.SECONDS);
+    } catch (TimeoutException e) {
+      if (debug) {
+        System.err.println("Unable to insert key " + key + ". Reason: TIME OUT");
+      }
+
+      return TIME_OUT;
+    } catch (Exception e) {
+      if (debug) {
+        System.err.println("Unable to insert key " + key + ". Reason: " + e.toString());
+      }
+
+      return Status.ERROR;
+    }
+
+    return Status.OK;
+  }
+
+  /**
+   * Auxiliary class needed for object substitution within the update operation. It is a fundamental part of the
+   * fetch-update (locally)-store cycle described by Basho to properly perform a strong-consistent update.
+   */
+  private static final class UpdateEntity extends UpdateValue.Update<RiakObject> {
+    private final RiakObject object;
+
+    private UpdateEntity(RiakObject object) {
+      this.object = object;
+    }
+
+    //Simply returns the object.
+    @Override
+    public RiakObject apply(RiakObject original) {
+      return object;
+    }
+  }
+
+  /**
+   * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the
+   * record with the specified record key, overwriting any existing values with the same field name.
+   *
+   * @param table  The name of the table (Riak bucket)
+   * @param key    The record key of the record to write.
+   * @param values A HashMap of field/value pairs to update in the record
+   * @return Zero on success, a non-zero error code on error
+   */
+  @Override
+  public Status update(String table, String key, HashMap<String, ByteIterator> values) {
+    // If eventual consistency model is in use, then an update operation is pratically equivalent to an insert one.
+    if (!strongConsistency) {
+      return insert(table, key, values);
+    }
+
+    Location location = new Location(new Namespace(bucketType, table), key);
+
+    UpdateValue update = new UpdateValue.Builder(location)
+        .withUpdate(new UpdateEntity(new RiakObject().setValue(BinaryValue.create(serializeTable(values)))))
+        .build();
+
+    RiakFuture<UpdateValue.Response, Location> future = riakClient.executeAsync(update);
+
+    try {
+      // For some reason, the update transaction doesn't throw any exception when no cluster has been started, so one
+      // needs to check whether it was done or not. When calling the wasUpdated() function with no nodes available, a
+      // NullPointerException is thrown.
+      // Moreover, such exception could be thrown when more threads are trying to update the same key or, more
+      // generally, when the system is being queried by many clients (i.e. overloaded). This is a known limitation of
+      // Riak KV's strong consistency implementation.
+      future.get(transactionTimeLimit, TimeUnit.SECONDS).wasUpdated();
+    } catch (TimeoutException e) {
+      if (debug) {
+        System.err.println("Unable to update key " + key + ". Reason: TIME OUT");
+      }
+
+      return TIME_OUT;
+    } catch (Exception e) {
+      if (debug) {
+        System.err.println("Unable to update key " + key + ". Reason: " + e.toString());
+      }
+
+      return Status.ERROR;
+    }
+
+    return Status.OK;
+  }
+
+  /**
+   * Delete a record from the database.
+   *
+   * @param table The name of the table (Riak bucket)
+   * @param key   The record key of the record to delete.
+   * @return Zero on success, a non-zero error code on error
+   */
+  @Override
+  public Status delete(String table, String key) {
+    Location location = new Location(new Namespace(bucketType, table), key);
+    DeleteValue dv = new DeleteValue.Builder(location).build();
+
+    RiakFuture<Void, Location> future = riakClient.executeAsync(dv);
+
+    try {
+      future.get(transactionTimeLimit, TimeUnit.SECONDS);
+    } catch (TimeoutException e) {
+      if (debug) {
+        System.err.println("Unable to delete key " + key + ". Reason: TIME OUT");
+      }
+
+      return TIME_OUT;
+    } catch (Exception e) {
+      if (debug) {
+        System.err.println("Unable to delete key " + key + ". Reason: " + e.toString());
+      }
+
+      return Status.ERROR;
+    }
+
+    return Status.OK;
+  }
+
+  public void cleanup() throws DBException {
+    try {
+      riakCluster.shutdown();
+    } catch (Exception e) {
+      System.err.println("Unable to properly shutdown the cluster. Reason: " + e.toString());
+      throw new DBException(e);
+    }
+  }
+
+  /**
+   * Auxiliary function needed for testing. It configures the default bucket-type to take care of the consistency
+   * problem by disallowing the siblings creation. Moreover, it disables strong consistency, because we don't have
+   * the possibility to create a proper bucket-type to use to fake 2i indexes usage.
+   *
+   * @param bucket     The bucket name.
+   * @throws Exception Thrown if something bad happens.
+     */
+  void setTestEnvironment(String bucket) throws Exception {
+    bucketType = "default";
+    bucketType2i = bucketType;
+    strongConsistency = false;
+
+    Namespace ns = new Namespace(bucketType, bucket);
+    StoreBucketProperties newBucketProperties = new StoreBucketProperties.Builder(ns).withAllowMulti(false).build();
+
+    riakClient.execute(newBucketProperties);
+  }
+}
diff --git a/riak/src/main/java/com/yahoo/ycsb/db/riak/RiakUtils.java b/riak/src/main/java/com/yahoo/ycsb/db/riak/RiakUtils.java
new file mode 100644
index 0000000000000000000000000000000000000000..59090fa14b8e670303a93adaa38afa680a9c3062
--- /dev/null
+++ b/riak/src/main/java/com/yahoo/ycsb/db/riak/RiakUtils.java
@@ -0,0 +1,188 @@
+/**
+ * Copyright (c) 2016 YCSB contributors All rights reserved.
+ * Copyright 2014 Basho Technologies, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+
+package com.yahoo.ycsb.db.riak;
+
+import java.io.*;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import com.basho.riak.client.api.commands.kv.FetchValue;
+import com.yahoo.ycsb.ByteArrayByteIterator;
+import com.yahoo.ycsb.ByteIterator;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+/**
+ * Utility class for Riak KV Client.
+ *
+ */
+final class RiakUtils {
+
+  private RiakUtils() {
+    super();
+  }
+
+  private static byte[] toBytes(final int anInteger) {
+    byte[] aResult = new byte[4];
+
+    aResult[0] = (byte) (anInteger >> 24);
+    aResult[1] = (byte) (anInteger >> 16);
+    aResult[2] = (byte) (anInteger >> 8);
+    aResult[3] = (byte) (anInteger /* >> 0 */);
+
+    return aResult;
+  }
+
+  private static int fromBytes(final byte[] aByteArray) {
+    checkArgument(aByteArray.length == 4);
+
+    return (aByteArray[0] << 24) | (aByteArray[1] & 0xFF) << 16 | (aByteArray[2] & 0xFF) << 8 | (aByteArray[3] & 0xFF);
+  }
+
+  private static void close(final OutputStream anOutputStream) {
+    try {
+      anOutputStream.close();
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+  }
+
+  private static void close(final InputStream anInputStream) {
+    try {
+      anInputStream.close();
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+  }
+
+  /**
+   * Serializes a Map, transforming the contained list of (String, ByteIterator) couples into a byte array.
+   *
+   * @param aTable A Map to serialize.
+   * @return A byte array containng the serialized table.
+     */
+  static byte[] serializeTable(Map<String, ByteIterator> aTable) {
+    final ByteArrayOutputStream anOutputStream = new ByteArrayOutputStream();
+    final Set<Map.Entry<String, ByteIterator>> theEntries = aTable.entrySet();
+
+    try {
+      for (final Map.Entry<String, ByteIterator> anEntry : theEntries) {
+        final byte[] aColumnName = anEntry.getKey().getBytes();
+
+        anOutputStream.write(toBytes(aColumnName.length));
+        anOutputStream.write(aColumnName);
+
+        final byte[] aColumnValue = anEntry.getValue().toArray();
+
+        anOutputStream.write(toBytes(aColumnValue.length));
+        anOutputStream.write(aColumnValue);
+      }
+      return anOutputStream.toByteArray();
+    } catch (IOException e) {
+      throw new IllegalStateException(e);
+    } finally {
+      close(anOutputStream);
+    }
+  }
+
+  /**
+   * Deserializes an input byte array, transforming it into a list of (String, ByteIterator) pairs (i.e. a Map).
+   *
+   * @param aValue    A byte array containing the table to deserialize.
+   * @param theResult A Map containing the deserialized table.
+     */
+  private static void deserializeTable(final byte[] aValue, final Map<String, ByteIterator> theResult) {
+    final ByteArrayInputStream anInputStream = new ByteArrayInputStream(aValue);
+    byte[] aSizeBuffer = new byte[4];
+
+    try {
+      while (anInputStream.available() > 0) {
+        anInputStream.read(aSizeBuffer);
+        final int aColumnNameLength = fromBytes(aSizeBuffer);
+
+        final byte[] aColumnNameBuffer = new byte[aColumnNameLength];
+        anInputStream.read(aColumnNameBuffer);
+
+        anInputStream.read(aSizeBuffer);
+        final int aColumnValueLength = fromBytes(aSizeBuffer);
+
+        final byte[] aColumnValue = new byte[aColumnValueLength];
+        anInputStream.read(aColumnValue);
+
+        theResult.put(new String(aColumnNameBuffer), new ByteArrayByteIterator(aColumnValue));
+      }
+    } catch (Exception e) {
+      throw new IllegalStateException(e);
+    } finally {
+      close(anInputStream);
+    }
+  }
+
+  /**
+   * Obtains a Long number from a key string. This will be the key used by Riak for all the transactions.
+   *
+   * @param key The key to convert from String to Long.
+   * @return A Long number parsed from the key String.
+     */
+  static Long getKeyAsLong(String key) {
+    String keyString = key.replaceFirst("[a-zA-Z]*", "");
+
+    return Long.parseLong(keyString);
+  }
+
+  /**
+   * Function that retrieves all the fields searched within a read or scan operation and puts them in the result
+   * HashMap.
+   *
+   * @param fields        The list of fields to read, or null for all of them.
+   * @param response      A Vector of HashMaps, where each HashMap is a set field/value pairs for one record.
+   * @param resultHashMap The HashMap to return as result.
+   */
+  static void createResultHashMap(Set<String> fields, FetchValue.Response response,
+                                  HashMap<String, ByteIterator>resultHashMap) {
+    // If everything went fine, then a result must be given. Such an object is a hash table containing the (field,
+    // value) pairs based on the requested fields. Note that in a read operation, ONLY ONE OBJECT IS RETRIEVED!
+    // The following line retrieves the previously serialized table which was store with an insert transaction.
+    byte[] responseFieldsAndValues = response.getValues().get(0).getValue().getValue();
+
+    // Deserialize the stored response table.
+    HashMap<String, ByteIterator> deserializedTable = new HashMap<>();
+    deserializeTable(responseFieldsAndValues, deserializedTable);
+
+    // If only specific fields are requested, then only these should be put in the result object!
+    if (fields != null) {
+      // Populate the HashMap to provide as result.
+      for (Object field : fields.toArray()) {
+        // Comparison between a requested field and the ones retrieved. If they're equal (i.e. the get() operation
+        // DOES NOT return a null value), then  proceed to store the pair in the resultHashMap.
+        ByteIterator value = deserializedTable.get(field);
+
+        if (value != null) {
+          resultHashMap.put((String) field, value);
+        }
+      }
+    } else {
+      // If, instead, no field is specified, then all those retrieved must be provided as result.
+      for (String field : deserializedTable.keySet()) {
+        resultHashMap.put(field, deserializedTable.get(field));
+      }
+    }
+  }
+}
diff --git a/riak/src/main/java/com/yahoo/ycsb/db/riak/package-info.java b/riak/src/main/java/com/yahoo/ycsb/db/riak/package-info.java
new file mode 100644
index 0000000000000000000000000000000000000000..32d163fdcf7cc0d3b7134e382caf673d593e54b2
--- /dev/null
+++ b/riak/src/main/java/com/yahoo/ycsb/db/riak/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Copyright (c) 2016 YCSB contributors All rights reserved.
+ * Copyright 2014 Basho Technologies, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+
+/**
+ * The YCSB binding for <a href="http://basho.com/products/riak-kv/">Riak KV</a> 2.x.y.
+ *
+ */
+package com.yahoo.ycsb.db.riak;
\ No newline at end of file
diff --git a/riak/src/main/resources/riak.properties b/riak/src/main/resources/riak.properties
new file mode 100644
index 0000000000000000000000000000000000000000..46c598fa24a0541afdcca106a7975251d69a3a07
--- /dev/null
+++ b/riak/src/main/resources/riak.properties
@@ -0,0 +1,61 @@
+##
+# Copyright (c) 2016 YCSB contributors All rights reserved.
+# Copyright 2014 Basho Technologies, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you
+# may not use this file except in compliance with the License. You
+# may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License. See accompanying
+# LICENSE file.
+#
+
+# RiakKVClient - Default Properties
+# Note: Change the properties below to set the values to use for your test. You can set them either here or from the
+# command line. Note that the latter choice overrides these settings.
+
+# riak.hosts - string list, comma separated list of IPs or FQDNs.
+# EX: 127.0.0.1,127.0.0.2,127.0.0.3 or riak1.mydomain.com,riak2.mydomain.com,riak3.mydomain.com
+riak.hosts=127.0.0.1
+
+# riak.port - int, the port on which every node is listening. It must match the one specified in the riak.conf file
+# at the line "listener.protobuf.internal".
+riak.port=8087
+
+# riak.bucket_type - string, must match value of bucket type created during setup. See readme.md for more information
+riak.bucket_type=ycsb
+
+# riak.r_val - int, the R value represents the number of Riak nodes that must return results for a read before the read
+# is considered successful.
+riak.r_val=2
+
+# riak.w_val - int, the W value represents the number of Riak nodes that must report success before an update is
+# considered complete.
+riak.w_val=2
+
+# riak.read_retry_count - int, number of times the client will try to read a key from Riak.
+riak.read_retry_count=5
+
+# riak.wait_time_before_retry - int, time (in milliseconds) the client waits before attempting to perform another
+# read if the previous one failed.
+riak.wait_time_before_retry=200
+
+# riak.transaction_time_limit - int, time (in seconds) the client waits before aborting the current transaction.
+riak.transaction_time_limit=10
+
+# riak.strong_consistency - boolean, indicates whether to use strong consistency (true) or eventual consistency (false).
+riak.strong_consistency=true
+
+# riak.strong_consistent_scans_bucket_type - string, indicates the bucket-type to use to allow scans transactions
+# when using strong consistency mode. Example: fakeBucketType.
+riak.strong_consistent_scans_bucket_type=
+
+# riak.debug - boolean, enables debug mode. This displays all the properties (specified or defaults) when a benchmark
+# is started.
+riak.debug=false
diff --git a/riak/src/test/java/com/yahoo/ycsb/db/riak/RiakKVClientTest.java b/riak/src/test/java/com/yahoo/ycsb/db/riak/RiakKVClientTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..a571fe43242601a653227f0e1fbc255780e626ef
--- /dev/null
+++ b/riak/src/test/java/com/yahoo/ycsb/db/riak/RiakKVClientTest.java
@@ -0,0 +1,264 @@
+/**
+ * Copyright (c) 2016 YCSB contributors All rights reserved.
+ * Copyright 2014 Basho Technologies, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+
+package com.yahoo.ycsb.db.riak;
+
+import java.util.*;
+
+import com.yahoo.ycsb.ByteIterator;
+import com.yahoo.ycsb.Status;
+import com.yahoo.ycsb.StringByteIterator;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assume.assumeNoException;
+import static org.junit.Assume.assumeThat;
+
+/**
+ * Integration tests for the Riak KV client.
+ */
+public class RiakKVClientTest {
+  private static RiakKVClient riakClient;
+
+  private static final String bucket = "testBucket";
+  private static final String keyPrefix = "testKey";
+  private static final int recordsToInsert = 20;
+  private static final int recordsToScan = 7;
+  private static final String firstField = "Key number";
+  private static final String secondField = "Key number doubled";
+  private static final String thirdField = "Key number square";
+
+  private static boolean testStarted = false;
+
+  /**
+   * Creates a cluster for testing purposes.
+   */
+  @BeforeClass
+  public static void setUpClass() throws Exception {
+    riakClient = new RiakKVClient();
+    riakClient.init();
+
+    // Set the test bucket environment with the appropriate parameters.
+    try {
+      riakClient.setTestEnvironment(bucket);
+    } catch(Exception e) {
+      assumeNoException("Unable to configure Riak KV for test, aborting.", e);
+    }
+
+    // Just add some records to work on...
+    for (int i = 0; i < recordsToInsert; i++) {
+      // Abort the entire test whenever the dataset population operation fails.
+      assumeThat("Riak KV is NOT RUNNING, aborting test.",
+          riakClient.insert(bucket, keyPrefix + String.valueOf(i), StringByteIterator.getByteIteratorMap(
+              createExpectedHashMap(i))),
+              is(Status.OK));
+    }
+
+    // Variable to check to determine whether the test has started or not.
+    testStarted = true;
+  }
+
+  /**
+   * Shuts down the cluster created.
+   */
+  @AfterClass
+  public static void tearDownClass() throws Exception {
+    // Delete all added keys before cleanup ONLY IF TEST ACTUALLY STARTED.
+    if (testStarted) {
+      for (int i = 0; i <= recordsToInsert; i++) {
+        delete(keyPrefix + Integer.toString(i));
+      }
+    }
+
+    riakClient.cleanup();
+  }
+
+  /**
+   * Test method for read transaction. It is designed to read two of the three fields stored for each key, to also test
+   * if the createResultHashMap() function implemented in RiakKVClient.java works as expected.
+   */
+  @Test
+  public void testRead() {
+    // Choose a random key to read, among the available ones.
+    int readKeyNumber = new Random().nextInt(recordsToInsert);
+
+    // Prepare two fields to read.
+    Set<String> fields = new HashSet<>();
+    fields.add(firstField);
+    fields.add(thirdField);
+
+    // Prepare an expected result.
+    HashMap<String, String> expectedValue = new HashMap<>();
+    expectedValue.put(firstField, Integer.toString(readKeyNumber));
+    expectedValue.put(thirdField, Integer.toString(readKeyNumber * readKeyNumber));
+
+    // Define a HashMap to store the actual result.
+    HashMap<String, ByteIterator> readValue = new HashMap<>();
+
+    // If a read transaction has been properly done, then one has to receive a Status.OK return from the read()
+    // function. Moreover, the actual returned result MUST match the expected one.
+    assertEquals("Read transaction FAILED.",
+        Status.OK,
+        riakClient.read(bucket, keyPrefix + Integer.toString(readKeyNumber), fields, readValue));
+
+    assertEquals("Read test FAILED. Actual read transaction value is NOT MATCHING the expected one.",
+        expectedValue.toString(),
+        readValue.toString());
+  }
+
+  /**
+   * Test method for scan transaction. A scan transaction has to be considered successfully completed only if all the
+   * requested values are read (i.e. scan transaction returns with Status.OK). Moreover, one has to check if the
+   * obtained results match the expected ones.
+   */
+  @Test
+  public void testScan() {
+    // Choose, among the available ones, a random key as starting point for the scan transaction.
+    int startScanKeyNumber = new Random().nextInt(recordsToInsert - recordsToScan);
+
+    // Prepare a HashMap vector to store the scan transaction results.
+    Vector<HashMap<String, ByteIterator>> scannedValues = new Vector<>();
+
+    // Check whether the scan transaction is correctly performed or not.
+    assertEquals("Scan transaction FAILED.",
+        Status.OK,
+        riakClient.scan(bucket, keyPrefix + Integer.toString(startScanKeyNumber), recordsToScan, null,
+            scannedValues));
+
+    // After the scan transaction completes, compare the obtained results with the expected ones.
+    for (int i = 0; i < recordsToScan; i++) {
+      assertEquals("Scan test FAILED: the current scanned key is NOT MATCHING the expected one.",
+          createExpectedHashMap(startScanKeyNumber + i).toString(),
+          scannedValues.get(i).toString());
+    }
+  }
+
+  /**
+   * Test method for update transaction. The test is designed to restore the previously read key. It is assumed to be
+   * correct when, after performing the update transaction, one reads the just provided values.
+   */
+  @Test
+  public void testUpdate() {
+    // Choose a random key to read, among the available ones.
+    int updateKeyNumber = new Random().nextInt(recordsToInsert);
+
+    // Define a HashMap to save the previously stored values for eventually restoring them.
+    HashMap<String, ByteIterator> readValueBeforeUpdate = new HashMap<>();
+    riakClient.read(bucket, keyPrefix + Integer.toString(updateKeyNumber), null, readValueBeforeUpdate);
+
+    // Prepare an update HashMap to store.
+    HashMap<String, String> updateValue = new HashMap<>();
+    updateValue.put(firstField, "UPDATED");
+    updateValue.put(secondField, "UPDATED");
+    updateValue.put(thirdField, "UPDATED");
+
+    // First of all, perform the update and check whether it's failed or not.
+    assertEquals("Update transaction FAILED.",
+        Status.OK,
+        riakClient.update(bucket, keyPrefix + Integer.toString(updateKeyNumber), StringByteIterator
+            .getByteIteratorMap(updateValue)));
+
+    // Then, read the key again and...
+    HashMap<String, ByteIterator> readValueAfterUpdate = new HashMap<>();
+    assertEquals("Update test FAILED. Unable to read key value.",
+        Status.OK,
+        riakClient.read(bucket, keyPrefix + Integer.toString(updateKeyNumber), null, readValueAfterUpdate));
+
+    // ...compare the result with the new one!
+    assertEquals("Update transaction NOT EXECUTED PROPERLY. Values DID NOT CHANGE.",
+        updateValue.toString(),
+        readValueAfterUpdate.toString());
+
+    // Finally, restore the previously read key.
+    assertEquals("Update test FAILED. Unable to restore previous key value.",
+        Status.OK,
+        riakClient.update(bucket, keyPrefix + Integer.toString(updateKeyNumber), readValueBeforeUpdate));
+  }
+
+  /**
+   * Test method for insert transaction. It is designed to insert a key just after the last key inserted in the setUp()
+   * phase.
+   */
+  @Test
+  public void testInsert() {
+    // Define a HashMap to insert and another one for the comparison operation.
+    HashMap<String, String> insertValue = createExpectedHashMap(recordsToInsert);
+    HashMap<String, ByteIterator> readValue = new HashMap<>();
+
+    // Check whether the insertion transaction was performed or not.
+    assertEquals("Insert transaction FAILED.",
+        Status.OK,
+        riakClient.insert(bucket, keyPrefix + Integer.toString(recordsToInsert), StringByteIterator.
+            getByteIteratorMap(insertValue)));
+
+    // Finally, compare the insertion performed with the one expected by reading the key.
+    assertEquals("Insert test FAILED. Unable to read inserted value.",
+        Status.OK,
+        riakClient.read(bucket, keyPrefix + Integer.toString(recordsToInsert), null, readValue));
+    assertEquals("Insert test FAILED. Actual read transaction value is NOT MATCHING the inserted one.",
+        insertValue.toString(),
+        readValue.toString());
+  }
+
+  /**
+   * Test method for delete transaction. The test deletes a key, then performs a read that should give a
+   * Status.NOT_FOUND response. Finally, it restores the previously read key.
+   */
+  @Test
+  public void testDelete() {
+    // Choose a random key to delete, among the available ones.
+    int deleteKeyNumber = new Random().nextInt(recordsToInsert);
+
+    // Define a HashMap to save the previously stored values for its eventual restore.
+    HashMap<String, ByteIterator> readValueBeforeDelete = new HashMap<>();
+    riakClient.read(bucket, keyPrefix + Integer.toString(deleteKeyNumber), null, readValueBeforeDelete);
+
+    // First of all, delete the key.
+    assertEquals("Delete transaction FAILED.",
+        Status.OK,
+        delete(keyPrefix + Integer.toString(deleteKeyNumber)));
+
+    // Then, check if the deletion was actually achieved.
+    assertEquals("Delete test FAILED. Key NOT deleted.",
+        Status.NOT_FOUND,
+        riakClient.read(bucket, keyPrefix + Integer.toString(deleteKeyNumber), null, null));
+
+    // Finally, restore the previously deleted key.
+    assertEquals("Delete test FAILED. Unable to restore previous key value.",
+        Status.OK,
+        riakClient.insert(bucket, keyPrefix + Integer.toString(deleteKeyNumber), readValueBeforeDelete));
+  }
+
+  private static Status delete(String key) {
+    return riakClient.delete(bucket, key);
+  }
+
+  private static HashMap<String, String> createExpectedHashMap(int value) {
+    HashMap<String, String> values = new HashMap<>();
+
+    values.put(firstField, Integer.toString(value));
+    values.put(secondField, Integer.toString(2 * value));
+    values.put(thirdField, Integer.toString(value * value));
+
+    return values;
+  }
+}
diff --git a/s3/pom.xml b/s3/pom.xml
index d5726a4621ffd67093a47c1498a0d6ac47758d32..f224641771ff4d9b443fb4cea273221840553eba 100644
--- a/s3/pom.xml
+++ b/s3/pom.xml
@@ -19,7 +19,7 @@ LICENSE file.
     <parent>
         <groupId>com.yahoo.ycsb</groupId>
         <artifactId>binding-parent</artifactId>
-        <version>0.9.0-SNAPSHOT</version>
+        <version>0.10.0-SNAPSHOT</version>
         <relativePath>../binding-parent</relativePath>
     </parent>
   
diff --git a/solr/pom.xml b/solr/pom.xml
index 8253ea02dbf57303494a58cf09010da762cae15e..2959bf9c3d4e8fa8a121980b32755e58c97b19f1 100644
--- a/solr/pom.xml
+++ b/solr/pom.xml
@@ -23,7 +23,7 @@ LICENSE file.
 	<parent>
 		<groupId>com.yahoo.ycsb</groupId>
 		<artifactId>binding-parent</artifactId>
-		<version>0.9.0-SNAPSHOT</version>
+		<version>0.10.0-SNAPSHOT</version>
 		<relativePath>../binding-parent</relativePath>
 	</parent>
 
diff --git a/tarantool/pom.xml b/tarantool/pom.xml
index acaea4ffc824e3640fe8afb08a415ac63cf2f07b..4b43ffc2d26466a1424426be7b8a7e26fd5bd352 100644
--- a/tarantool/pom.xml
+++ b/tarantool/pom.xml
@@ -22,7 +22,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent/</relativePath>
   </parent>
 
diff --git a/voldemort/pom.xml b/voldemort/pom.xml
index f891a65a9f8b1e886c7a40fa64747a5d3f363c9e..fab15f0b85075469c17b0be981f6fa7f680a2967 100644
--- a/voldemort/pom.xml
+++ b/voldemort/pom.xml
@@ -22,7 +22,7 @@ LICENSE file.
   <parent>
     <groupId>com.yahoo.ycsb</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>0.9.0-SNAPSHOT</version>
+    <version>0.10.0-SNAPSHOT</version>
     <relativePath>../binding-parent</relativePath>
    </parent>
   
diff --git a/workloads/workload_template b/workloads/workload_template
index f5e80c8899e0e66ab41ea7a2add5c4a511dcb5e5..b66d3b6ef82ba9378b9d9cf38c00881471aec6ed 100644
--- a/workloads/workload_template
+++ b/workloads/workload_template
@@ -1,4 +1,4 @@
-# Copyright (c) 2012 YCSB contributors. All rights reserved.
+# Copyright (c) 2012-2016 YCSB contributors. All rights reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you
 # may not use this file except in compliance with the License. You
@@ -133,6 +133,16 @@ measurementtype=histogram
 # a new output file will be created.
 #measurement.raw.output_file = /tmp/your_output_file_for_this_run
 
+# JVM Reporting.
+#
+# Measure JVM information over time including GC counts, max and min memory
+# used, max and min thread counts, max and min system load and others. This
+# setting must be enabled in conjunction with the "-s" flag to run the status
+# thread. Every "status.interval", the status thread will capture JVM 
+# statistics and record the results. At the end of the run, max and mins will
+# be recorded.
+# measurement.trackjvm = false
+
 # The range of latencies to track in the histogram (milliseconds)
 histogram.buckets=1000
 
@@ -169,3 +179,25 @@ timeseries.granularity=1000
 #
 # the following number controls the interval between retries (in seconds):
 # core_workload_insertion_retry_interval = 3
+
+# Distributed Tracing via Apache HTrace (http://htrace.incubator.apache.org/)
+#
+# Defaults to blank / no tracing
+# Below sends to a local file, sampling at 0.1%
+#
+# htrace.sampler.classes=ProbabilitySampler
+# htrace.sampler.fraction=0.001
+# htrace.span.receiver.classes=org.apache.htrace.core.LocalFileSpanReceiver
+# htrace.local.file.span.receiver.path=/some/path/to/local/file
+#
+# To capture all spans, use the AlwaysSampler
+#
+# htrace.sampler.classes=AlwaysSampler
+#
+# To send spans to an HTraced receiver, use the below and ensure
+# your classpath contains the htrace-htraced jar (i.e. when invoking the ycsb
+# command add -cp /path/to/htrace-htraced.jar)
+#
+# htrace.span.receiver.classes=org.apache.htrace.impl.HTracedSpanReceiver
+# htrace.htraced.receiver.address=example.com:9075
+# htrace.htraced.error.log.period.ms=10000