Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Refactored writeBuffer method on GridStore, removed nested calls and …

…simplified code
  • Loading branch information...
commit 365c6f89e24d3d4da9bd7b278b9c810838974779 1 parent 62bf8cf
@christkv authored
View
1  .gitignore
@@ -1,6 +1,7 @@
.DS_Store
*.swp
*.seed
+*.tmp
.project
.settings
data
View
1  .npmignore
@@ -3,6 +3,7 @@
.buildinfo
.mongodb
.DS_Store
+*,tmp
HISTORY
Readme.md
View
3  lib/mongodb/gridfs/chunk.js
@@ -72,7 +72,8 @@ var Chunk = exports.Chunk = function(file, mongoObject) {
Chunk.prototype.write = function(data, callback) {
this.data.write(data, this.internalPosition);
this.internalPosition = this.data.length();
- callback(null, this);
+ if(callback != null) return callback(null, this);
+ return this;
};
/**
View
113 lib/mongodb/gridfs/gridstore.js
@@ -278,6 +278,7 @@ GridStore.prototype.writeFile = function (file, callback) {
var chunk = new Chunk(self, {n:index++});
chunk.write(data, function(err, chunk) {
chunk.save(function(err, result) {
+ self.position = self.position + data.length;
// Point to current chunk
self.currentChunk = chunk;
@@ -386,62 +387,62 @@ GridStore.prototype.write = function(data, close, callback) {
*/
var writeBuffer = function(self, buffer, close, callback) {
if(typeof close === "function") { callback = close; close = null; }
- var finalClose = (close == null) ? false : close;
+ var finalClose = (close == null) ? false : close;
if(self.mode[0] != "w") {
callback(new Error((self.referenceBy == REFERENCE_BY_ID ? self.toHexString() : self.filename) + " not opened for writing"), null);
} else {
- if((self.currentChunk.position + buffer.length) >= self.chunkSize) {
- // Data exceeds current chunk remaining free size; fill up current chunk and write the rest
- // to a new chunk (recursively)
- var previousChunkNumber = self.currentChunk.chunkNumber;
- var leftOverDataSize = self.chunkSize - self.currentChunk.position;
- var firstChunkData = buffer.slice(0, leftOverDataSize);
- var leftOverData = buffer.slice(leftOverDataSize);
- // Save out current Chunk as another variable and assign a new Chunk for overflow data
- var saveChunk = self.currentChunk;
- // Create a new chunk at once (avoid wrong writing of chunks)
- self.currentChunk = new Chunk(self, {'n': (previousChunkNumber + 1)});
- // Let's finish the current chunk and then call write again for the remaining data
- saveChunk.write(firstChunkData, function(err, chunk) {
- // Save previous chunk written Size
- self.previousChunkSize = chunk.position;
- // Save the chunk
- chunk.save(function(err, result) {
- self.position = self.position + leftOverDataSize;
-
- // Write the remaining data
- writeBuffer(self, leftOverData, function(err, gridStore) {
- if(finalClose) {
- self.close(function(err, result) {
- callback(null, gridStore);
- });
- }
- else {
- callback(null, gridStore);
- }
- });
- });
- });
- } else {
- // to a new chunk (recursively)
- var previousChunkNumber = self.currentChunk.chunkNumber;
- // Save out current Chunk as another variable and assign a new Chunk for overflow data
- var saveChunk = self.currentChunk;
- // Write buffer to chunk all at once
- saveChunk.write(buffer, function(err, chunk) {
- // Save the chunk
- self.position = self.position + buffer.length;
- if(finalClose) {
- self.close(function(err, result) {
- callback(null, self);
- });
- } else {
- callback(null, self);
- }
- });
- }
- }
+ if(self.currentChunk.position + buffer.length >= self.chunkSize) {
+ // Write out the current Chunk and then keep writing until we have less data left than a chunkSize left
+ // to a new chunk (recursively)
+ var previousChunkNumber = self.currentChunk.chunkNumber;
+ var leftOverDataSize = self.chunkSize - self.currentChunk.position;
+ var firstChunkData = buffer.slice(0, leftOverDataSize);
+ var leftOverData = buffer.slice(leftOverDataSize);
+ // A list of chunks to write out
+ var chunksToWrite = [self.currentChunk.write(firstChunkData)];
+ // If we have more data left than the chunk size let's keep writing new chunks
+ while(leftOverData.length >= self.chunkSize) {
+ // Create a new chunk and write to it
+ var newChunk = new Chunk(self, {'n': (previousChunkNumber + 1)});
+ var firstChunkData = leftOverData.slice(0, self.chunkSize);
+ leftOverData = leftOverData.slice(self.chunkSize);
+ // Update chunk number
+ previousChunkNumber = previousChunkNumber + 1;
+ // Write data
+ newChunk.write(firstChunkData);
+ // Push chunk to save list
+ chunksToWrite.push(newChunk);
+ }
+
+ // Set current chunk with remaining data
+ self.currentChunk = new Chunk(self, {'n': (previousChunkNumber + 1)});
+ // If we have left over data write it
+ if(leftOverData.length > 0) self.currentChunk.write(leftOverData);
+
+ // Update the position for the gridstore
+ self.position = self.position + buffer.length;
+ // Total number of chunks to write
+ var numberOfChunksToWrite = chunksToWrite.length;
+ // Write out all the chunks and then return
+ for(var i = 0; i < chunksToWrite.length; i++) {
+ var chunk = chunksToWrite[i];
+ chunk.save(function(err, result) {
+ numberOfChunksToWrite = numberOfChunksToWrite - 1;
+
+ if(numberOfChunksToWrite <= 0) {
+ return callback(null, self);
+ }
+ })
+ }
+ } else {
+ // Update the position for the gridstore
+ self.position = self.position + buffer.length;
+ // We have less data than the chunk size just write it and callback
+ self.currentChunk.write(buffer);
+ callback(null, self);
+ }
+ }
};
/**
@@ -478,14 +479,18 @@ var buildMongoObject = function(self, callback) {
chunkNumber = self.currentChunk.chunkNumber;
previousChunkSize = self.currentChunk.position;
}
+
+ // console.log("============================== self.currentChunk.chunkNumber :: " + self.currentChunk.chunkNumber)
+ // console.log("============================== self.currentChunk.position :: " + self.currentChunk.position)
+ // console.log(self.position)
// Calcuate the length
- var length = self.currentChunk != null ? (chunkNumber * self.chunkSize + previousChunkSize) : 0;
+ var length = self.currentChunk != null ? (chunkNumber * self.chunkSize + previousChunkSize) : 0;
var mongoObject = {
'_id': self.fileId,
'filename': self.filename,
'contentType': self.contentType,
- 'length': length < 0 ? 0 : length,
+ 'length': self.position ? self.position : 0,
'chunkSize': self.chunkSize,
'uploadDate': self.uploadDate,
'aliases': self.aliases,
View
137 test/gridstore/grid_store_test.js
@@ -191,9 +191,11 @@ exports.shouldCorrectlyExecuteGridStoreList = function(test) {
// Establish connection to db
db.open(function(err, db) {
+ // Our file id
+ var fileId = new ObjectID();
// Open a file for writing
- var gridStore = new GridStore(db, "foobar2", "w");
+ var gridStore = new GridStore(db, fileId, "foobar2", "w");
gridStore.open(function(err, gridStore) {
// Write some content to the file
@@ -243,8 +245,10 @@ exports.shouldCorrectlyExecuteGridStoreList = function(test) {
test.ok(items.length >= 0);
test.ok(!found);
+ // Specify seperate id
+ var fileId2 = new ObjectID();
// Write another file to GridFS
- var gridStore2 = new GridStore(db, "foobar3", "w");
+ var gridStore2 = new GridStore(db, fileId2, "foobar3", "w");
gridStore2.open(function(err, gridStore) {
// Write the content
gridStore2.write('my file', function(err, gridStore) {
@@ -666,6 +670,135 @@ exports.shouldCorrectlyPerformWorkingFiledReadWithChunkSizeLessThanFileSize = fu
/**
* @ignore
*/
+exports.shouldCorrectlyPerformWorkingFiledWithBigFile = function(test) {
+ // Prepare fake big file
+ var data = fs.readFileSync("./test/gridstore/test_gs_working_field_read.pdf", 'binary');
+ // Write the data multiple times
+ var fd = fs.openSync("./test_gs_working_field_read.tmp", 'w');
+ // Write the data 10 times to create a big file
+ for(var i = 0; i < 10; i++) {
+ fs.writeSync(fd, data);
+ }
+ // Close the file
+ fs.close(fd);
+
+ // Create a new file
+ var gridStore = new GridStore(client, null, "w");
+
+ // This shouldnt have to be set higher than the file...
+ gridStore.chunkSize = 80960;
+
+ // Open the file
+ gridStore.open(function(err, gridStore) {
+ var file = fs.createReadStream('./test_gs_working_field_read.tmp');
+ var dataSize = 0;
+
+ // Write the binary file data to GridFS
+ file.on('data', function (chunk) {
+ dataSize += chunk.length;
+
+ gridStore.write(chunk, function(err, gridStore) {
+ if(err) {
+ test.ok(false);
+ }
+ });
+ });
+
+ file.on('close', function () {
+ // Flush the remaining data to GridFS
+ gridStore.close(function(err, result) {
+ // Read in the whole file and check that it's the same content
+ GridStore.read(client, result._id, function(err, fileData) {
+ var data = fs.readFileSync('./test_gs_working_field_read.tmp');
+ // for(var i = 0; i < data.length; i++) {
+ // // console.dir(data[i] + " = " + fileData[i] + "::" + (data[i] == fileData[i]))
+ // if(!(data[i] == fileData[i])) {
+ // console.log("--------------------- not equal from byte :: "+ i)
+ // console.dir(data[i] + " = " + fileData[i] + "::" + (data[i] == fileData[i]))
+ // }
+ // }
+
+ // console.log("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
+ // console.log("data.length :: " + data.length)
+ // console.log("fileData.length :: " + fileData.length)
+
+ test.equal(data.toString('base64'), fileData.toString('base64'));
+ test.done();
+ });
+ });
+ });
+ });
+}
+
+/**
+ * @ignore
+ */
+exports.shouldCorrectlyPerformWorkingFiledWriteWithDifferentChunkSizes = function(test) {
+ // Prepare fake big file
+ var data = fs.readFileSync("./test/gridstore/test_gs_working_field_read.pdf", 'binary');
+ // Write the data multiple times
+ var fd = fs.openSync("./test_gs_working_field_read.tmp", 'w');
+ // Write the data 10 times to create a big file
+ for(var i = 0; i < 10; i++) {
+ fs.writeSync(fd, data);
+ }
+ // Close the file
+ fs.close(fd);
+ // File Size
+ var fileSize = fs.statSync('./test_gs_working_field_read.tmp').size;
+
+ var executeTest = function(_chunkSize, _test, callback) {
+ // Create a new file
+ var gridStore = new GridStore(client, null, "w");
+
+ // This shouldnt have to be set higher than the file...
+ gridStore.chunkSize = _chunkSize;
+
+ // Open the file
+ gridStore.open(function(err, gridStore) {
+ var file = fs.createReadStream('./test_gs_working_field_read.tmp');
+ var dataSize = 0;
+
+ // Write the binary file data to GridFS
+ file.on('data', function (chunk) {
+ dataSize += chunk.length;
+
+ gridStore.write(chunk, function(err, gridStore) {
+ if(err) {
+ test.ok(false);
+ }
+ });
+ });
+
+ file.on('close', function () {
+ // Flush the remaining data to GridFS
+ gridStore.close(function(err, result) {
+ // Read in the whole file and check that it's the same content
+ GridStore.read(client, result._id, function(err, fileData) {
+ var data = fs.readFileSync('./test_gs_working_field_read.tmp');
+ _test.equal(data.toString('base64'), fileData.toString('base64'));
+ callback(null, null);
+ });
+ });
+ });
+ });
+ }
+
+ // Execute big chunk size
+ executeTest(80960, test, function(err, result) {
+ // Execute small chunk size
+ executeTest(5000, test, function(err, result) {
+ // Execute chunksize larger than file
+ executeTest(fileSize+100, test, function(err, result) {
+ test.done();
+ });
+ });
+ });
+}
+
+/**
+ * @ignore
+ */
exports.shouldCorrectlyReadAndWriteFile = function(test) {
var gridStore = new GridStore(client, "test_gs_weird_bug", "w");
var data = fs.readFileSync("./test/gridstore/test_gs_weird_bug.png", 'binary');
Please sign in to comment.
Something went wrong with that request. Please try again.