Skip to content
Snippets Groups Projects
Commit dc86d726 authored by Sean Busbey's avatar Sean Busbey
Browse files

[s3] whitespace cleanup.

parent f84956ae
No related branches found
No related tags found
No related merge requests found
......@@ -54,26 +54,26 @@ import com.amazonaws.services.s3.model.GetObjectMetadataRequest;
/**
* S3 Storage client for YCSB framework.
*
*
* Properties to set:
*
*
* s3.accessKeyId=access key S3 aws
* s3.secretKey=secret key S3 aws
* s3.endPoint=s3.amazonaws.com
* s3.region=us-east-1
* The parameter table is the name of the Bucket where to upload the files.
* The parameter table is the name of the Bucket where to upload the files.
* This must be created before to start the benchmark
* The size of the file to upload is determined by two parameters:
* - fieldcount this is the number of fields of a record in YCSB
* - fieldlength this is the size in bytes of a single field in the record
* together these two parameters define the size of the file to upload,
* together these two parameters define the size of the file to upload,
* the size in bytes is given by the fieldlength multiplied by the fieldcount.
* The name of the file is determined by the parameter key.
* The name of the file is determined by the parameter key.
*This key is automatically generated by YCSB.
*
*/
public class S3Client extends DB {
private static AmazonS3Client s3Client;
private static String sse;
private static SSECustomerKey ssecKey;
......@@ -81,7 +81,7 @@ public class S3Client extends DB {
/**
* Cleanup any state for this storage.
* Called once per S3 instance;
* Called once per S3 instance;
*/
@Override
public void cleanup() throws DBException {
......@@ -101,7 +101,7 @@ public class S3Client extends DB {
}
/**
* Delete a file from S3 Storage.
*
*
* @param bucket
* The name of the bucket
* @param key
......@@ -207,7 +207,7 @@ public class S3Client extends DB {
} catch (Exception e){
System.err.println("The file properties doesn't exist "+e.toString());
e.printStackTrace();
}
}
try {
System.out.println("Inizializing the S3 connection");
s3Credentials = new BasicAWSCredentials(accessKeyId, secretKey);
......@@ -242,42 +242,42 @@ public class S3Client extends DB {
* Create a new File in the Bucket. Any field/value pairs in the specified
* values HashMap will be written into the file with the specified record
* key.
*
*
* @param bucket
* The name of the bucket
* @param key
* The record key of the file to insert.
* @param values
* A HashMap of field/value pairs to insert in the file.
* A HashMap of field/value pairs to insert in the file.
* Only the content of the first field is written to a byteArray
* multiplied by the number of field. In this way the size
* of the file to upload is determined by the fieldlength
* multiplied by the number of field. In this way the size
* of the file to upload is determined by the fieldlength
* and fieldcount parameters.
* @return OK on success, ERROR otherwise. See the
* {@link DB} class's description for a discussion of error codes.
*/
*/
@Override
public Status insert(String bucket, String key,
public Status insert(String bucket, String key,
HashMap<String, ByteIterator> values) {
return writeToStorage(bucket, key, values, true, sse, ssecKey);
}
/**
* Read a file from the Bucket. Each field/value pair from the result
* will be stored in a HashMap.
*
*
* @param bucket
* The name of the bucket
* @param key
* The record key of the file to read.
* @param fields
* The list of fields to read, or null for all of them,
* The list of fields to read, or null for all of them,
* it is null by default
* @param result
* A HashMap of field/value pairs for the result
* @return OK on success, ERROR otherwise.
*/
@Override
public Status read(String bucket, String key, Set<String> fields,
public Status read(String bucket, String key, Set<String> fields,
HashMap<String, ByteIterator> result) {
return readFromStorage(bucket, key, result, ssecKey);
}
......@@ -285,7 +285,7 @@ public class S3Client extends DB {
* Update a file in the database. Any field/value pairs in the specified
* values HashMap will be written into the file with the specified file
* key, overwriting any existing values with the same field name.
*
*
* @param bucket
* The name of the bucket
* @param key
......@@ -295,14 +295,14 @@ public class S3Client extends DB {
* @return OK on success, ERORR otherwise.
*/
@Override
public Status update(String bucket, String key,
public Status update(String bucket, String key,
HashMap<String, ByteIterator> values) {
return writeToStorage(bucket, key, values, false, sse, ssecKey);
}
/**
* Perform a range scan for a set of files in the bucket. Each
* field/value pair from the result will be stored in a HashMap.
*
*
* @param bucket
* The name of the bucket
* @param startkey
......@@ -317,13 +317,13 @@ public class S3Client extends DB {
* @return OK on success, ERROR otherwise.
*/
@Override
public Status scan(String bucket, String startkey, int recordcount,
public Status scan(String bucket, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
return scanFromStorage(bucket, startkey, recordcount, result, ssecKey);
}
/**
* Upload a new object to S3 or update an object on S3.
*
* Upload a new object to S3 or update an object on S3.
*
* @param bucket
* The name of the bucket
* @param key
......@@ -333,17 +333,17 @@ public class S3Client extends DB {
* @param updateMarker
* A boolean value. If true a new object will be uploaded
* to S3. If false an existing object will be re-uploaded
*
*
*/
protected Status writeToStorage(String bucket, String key,
HashMap<String, ByteIterator> values, Boolean updateMarker,
protected Status writeToStorage(String bucket, String key,
HashMap<String, ByteIterator> values, Boolean updateMarker,
String sseLocal, SSECustomerKey ssecLocal) {
int totalSize = 0;
int fieldCount = values.size(); //number of fields to concatenate
// getting the first field in the values
Object keyToSearch = values.keySet().toArray()[0];
Object keyToSearch = values.keySet().toArray()[0];
// getting the content of just one field
byte[] sourceArray = values.get(keyToSearch).toArray();
byte[] sourceArray = values.get(keyToSearch).toArray();
int sizeArray = sourceArray.length; //size of each array
if (updateMarker){
totalSize = sizeArray*fieldCount;
......@@ -352,18 +352,18 @@ public class S3Client extends DB {
GetObjectRequest getObjectRequest = null;
GetObjectMetadataRequest getObjectMetadataRequest = null;
if (ssecLocal != null) {
getObjectRequest = new GetObjectRequest(bucket,
getObjectRequest = new GetObjectRequest(bucket,
key).withSSECustomerKey(ssecLocal);
getObjectMetadataRequest = new GetObjectMetadataRequest(bucket,
getObjectMetadataRequest = new GetObjectMetadataRequest(bucket,
key).withSSECustomerKey(ssecLocal);
} else {
getObjectRequest = new GetObjectRequest(bucket, key);
getObjectMetadataRequest = new GetObjectMetadataRequest(bucket,
getObjectMetadataRequest = new GetObjectMetadataRequest(bucket,
key);
}
S3Object object =
S3Object object =
s3Client.getObject(getObjectRequest);
ObjectMetadata objectMetadata =
ObjectMetadata objectMetadata =
s3Client.getObjectMetadata(getObjectMetadataRequest);
int sizeOfFile = (int)objectMetadata.getContentLength();
fieldCount = sizeOfFile/sizeArray;
......@@ -386,28 +386,28 @@ public class S3Client extends DB {
PutObjectRequest putObjectRequest = null;
if (sseLocal.equals("true")) {
metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
putObjectRequest = new PutObjectRequest(bucket, key,
putObjectRequest = new PutObjectRequest(bucket, key,
input, metadata);
} else if (ssecLocal != null) {
putObjectRequest = new PutObjectRequest(bucket, key,
putObjectRequest = new PutObjectRequest(bucket, key,
input, metadata).withSSECustomerKey(ssecLocal);
} else {
putObjectRequest = new PutObjectRequest(bucket, key,
putObjectRequest = new PutObjectRequest(bucket, key,
input, metadata);
}
try {
PutObjectResult res =
PutObjectResult res =
s3Client.putObject(putObjectRequest);
if(res.getETag() == null) {
return Status.ERROR;
} else {
if (sseLocal.equals("true")) {
System.out.println("Uploaded object encryption status is " +
res.getSSEAlgorithm());
System.out.println("Uploaded object encryption status is " +
res.getSSEAlgorithm());
} else if (ssecLocal != null) {
System.out.println("Uploaded object encryption status is " +
res.getSSEAlgorithm());
System.out.println("Uploaded object encryption status is " +
res.getSSEAlgorithm());
}
}
} catch (Exception e) {
......@@ -425,34 +425,34 @@ public class S3Client extends DB {
}
/**
* Download an object from S3.
*
* Download an object from S3.
*
* @param bucket
* The name of the bucket
* @param key
* The file key of the object to upload/update.
* @param result
* The Hash map where data from the object are written
*
*
*/
protected Status readFromStorage(String bucket, String key,
protected Status readFromStorage(String bucket, String key,
HashMap<String, ByteIterator> result, SSECustomerKey ssecLocal) {
try {
GetObjectRequest getObjectRequest = null;
GetObjectMetadataRequest getObjectMetadataRequest = null;
if (ssecLocal != null) {
getObjectRequest = new GetObjectRequest(bucket,
getObjectRequest = new GetObjectRequest(bucket,
key).withSSECustomerKey(ssecLocal);
getObjectMetadataRequest = new GetObjectMetadataRequest(bucket,
getObjectMetadataRequest = new GetObjectMetadataRequest(bucket,
key).withSSECustomerKey(ssecLocal);
} else {
getObjectRequest = new GetObjectRequest(bucket, key);
getObjectMetadataRequest = new GetObjectMetadataRequest(bucket,
getObjectMetadataRequest = new GetObjectMetadataRequest(bucket,
key);
}
S3Object object =
S3Object object =
s3Client.getObject(getObjectRequest);
ObjectMetadata objectMetadata =
ObjectMetadata objectMetadata =
s3Client.getObjectMetadata(getObjectMetadataRequest);
InputStream objectData = object.getObjectContent(); //consuming the stream
// writing the stream to bytes and to results
......@@ -472,7 +472,7 @@ public class S3Client extends DB {
/**
* Perform an emulation of a database scan operation on a S3 bucket.
*
*
* @param bucket
* The name of the bucket
* @param startkey
......@@ -484,9 +484,9 @@ public class S3Client extends DB {
* @param result
* A Vector of HashMaps, where each HashMap is a set field/value
* pairs for one file
*
*
*/
protected Status scanFromStorage(String bucket, String startkey,
protected Status scanFromStorage(String bucket, String startkey,
int recordcount, Vector<HashMap<String, ByteIterator>> result,
SSECustomerKey ssecLocal) {
......@@ -502,7 +502,7 @@ public class S3Client extends DB {
summaries.addAll(listing.getObjectSummaries());
}
for (S3ObjectSummary summary : summaries) {
String summaryKey = summary.getKey();
String summaryKey = summary.getKey();
keyList.add(summaryKey);
}
// Sorting the list of files in Alphabetical order
......@@ -515,19 +515,19 @@ public class S3Client extends DB {
counter = counter + 1;
}
}
// Checking if the total number of file is bigger than the file to read,
// Checking if the total number of file is bigger than the file to read,
// if not using the total number of Files
if (recordcount < keyList.size()) {
numberOfIteration = recordcount;
} else {
numberOfIteration = keyList.size();
}
// Reading the Files starting from the startkey File till the end
// Reading the Files starting from the startkey File till the end
// of the Files or Till the recordcount number
for (int i = startkeyNumber; i < numberOfIteration; i++){
HashMap<String, ByteIterator> resultTemp =
HashMap<String, ByteIterator> resultTemp =
new HashMap<String, ByteIterator>();
readFromStorage(bucket, keyList.get(i), resultTemp,
readFromStorage(bucket, keyList.get(i), resultTemp,
ssecLocal);
result.add(resultTemp);
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment