1 var Emitter = require('events').EventEmitter;
2 var GridFSBucketReadStream = require('./download');
3 var GridFSBucketWriteStream = require('./upload');
4 var shallowClone = require('../utils').shallowClone;
5 var toError = require('../utils').toError;
6 var util = require('util');
8 var DEFAULT_GRIDFS_BUCKET_OPTIONS = {
10 chunkSizeBytes: 255 * 1024
13 module.exports = GridFSBucket;
16 * Constructor for a streaming GridFS interface
18 * @param {Db} db A db handle
19 * @param {object} [options=null] Optional settings.
20 * @param {string} [options.bucketName="fs"] The 'files' and 'chunks' collections will be prefixed with the bucket name followed by a dot.
21 * @param {number} [options.chunkSizeBytes=255 * 1024] Number of bytes stored in each chunk. Defaults to 255KB
22 * @param {object} [options.writeConcern=null] Optional write concern to be passed to write operations, for instance `{ w: 1 }`
23 * @param {object} [options.readPreference=null] Optional read preference to be passed to read operations
24 * @fires GridFSBucketWriteStream#index
25 * @return {GridFSBucket}
28 function GridFSBucket(db, options) {
30 this.setMaxListeners(0);
32 if (options && typeof options === 'object') {
33 options = shallowClone(options);
34 var keys = Object.keys(DEFAULT_GRIDFS_BUCKET_OPTIONS);
35 for (var i = 0; i < keys.length; ++i) {
36 if (!options[keys[i]]) {
37 options[keys[i]] = DEFAULT_GRIDFS_BUCKET_OPTIONS[keys[i]];
41 options = DEFAULT_GRIDFS_BUCKET_OPTIONS;
47 _chunksCollection: db.collection(options.bucketName + '.chunks'),
48 _filesCollection: db.collection(options.bucketName + '.files'),
49 checkedIndexes: false,
50 calledOpenUploadStream: false,
51 promiseLibrary: db.s.promiseLibrary ||
52 (typeof global.Promise == 'function' ? global.Promise : require('es6-promise').Promise)
56 util.inherits(GridFSBucket, Emitter);
59 * When the first call to openUploadStream is made, the upload stream will
60 * check to see if it needs to create the proper indexes on the chunks and
61 * files collections. This event is fired either when 1) it determines that
62 * no index creation is necessary, 2) when it successfully creates the
65 * @event GridFSBucket#index
70 * Returns a writable stream (GridFSBucketWriteStream) for writing
71 * buffers to GridFS. The stream's 'id' property contains the resulting
74 * @param {string} filename The value of the 'filename' key in the files doc
75 * @param {object} [options=null] Optional settings.
76 * @param {number} [options.chunkSizeBytes=null] Optional overwrite this bucket's chunkSizeBytes for this file
77 * @param {object} [options.metadata=null] Optional object to store in the file document's `metadata` field
78 * @param {string} [options.contentType=null] Optional string to store in the file document's `contentType` field
79 * @param {array} [options.aliases=null] Optional array of strings to store in the file document's `aliases` field
80 * @return {GridFSBucketWriteStream}
83 GridFSBucket.prototype.openUploadStream = function(filename, options) {
85 options = shallowClone(options);
89 if (!options.chunkSizeBytes) {
90 options.chunkSizeBytes = this.s.options.chunkSizeBytes;
92 return new GridFSBucketWriteStream(this, filename, options);
96 * Returns a writable stream (GridFSBucketWriteStream) for writing
97 * buffers to GridFS for a custom file id. The stream's 'id' property contains the resulting
100 * @param {string|number|object} id A custom id used to identify the file
101 * @param {string} filename The value of the 'filename' key in the files doc
102 * @param {object} [options=null] Optional settings.
103 * @param {number} [options.chunkSizeBytes=null] Optional overwrite this bucket's chunkSizeBytes for this file
104 * @param {object} [options.metadata=null] Optional object to store in the file document's `metadata` field
105 * @param {string} [options.contentType=null] Optional string to store in the file document's `contentType` field
106 * @param {array} [options.aliases=null] Optional array of strings to store in the file document's `aliases` field
107 * @return {GridFSBucketWriteStream}
110 GridFSBucket.prototype.openUploadStreamWithId = function(id, filename, options) {
112 options = shallowClone(options);
117 if (!options.chunkSizeBytes) {
118 options.chunkSizeBytes = this.s.options.chunkSizeBytes;
123 return new GridFSBucketWriteStream(this, filename, options);
127 * Returns a readable stream (GridFSBucketReadStream) for streaming file
130 * @param {ObjectId} id The id of the file doc
131 * @param {Object} [options=null] Optional settings.
132 * @param {Number} [options.start=null] Optional 0-based offset in bytes to start streaming from
133 * @param {Number} [options.end=null] Optional 0-based offset in bytes to stop streaming before
134 * @return {GridFSBucketReadStream}
137 GridFSBucket.prototype.openDownloadStream = function(id, options) {
138 var filter = { _id: id };
140 start: options && options.start,
141 end: options && options.end
143 return new GridFSBucketReadStream(this.s._chunksCollection,
144 this.s._filesCollection, this.s.options.readPreference, filter, options);
148 * Deletes a file with the given id
150 * @param {ObjectId} id The id of the file doc
151 * @param {GridFSBucket~errorCallback} [callback]
154 GridFSBucket.prototype.delete = function(id, callback) {
155 if (typeof callback === 'function') {
156 return _delete(this, id, callback);
160 return new this.s.promiseLibrary(function(resolve, reject) {
161 _delete(_this, id, function(error, res) {
175 function _delete(_this, id, callback) {
176 _this.s._filesCollection.deleteOne({ _id: id }, function(error, res) {
178 return callback(error);
181 _this.s._chunksCollection.deleteMany({ files_id: id }, function(error) {
183 return callback(error);
186 // Delete orphaned chunks before returning FileNotFound
188 var errmsg = 'FileNotFound: no file with id ' + id + ' found';
189 return callback(new Error(errmsg));
198 * Convenience wrapper around find on the files collection
200 * @param {Object} filter
201 * @param {Object} [options=null] Optional settings for cursor
202 * @param {number} [options.batchSize=null] Optional batch size for cursor
203 * @param {number} [options.limit=null] Optional limit for cursor
204 * @param {number} [options.maxTimeMS=null] Optional maxTimeMS for cursor
205 * @param {boolean} [options.noCursorTimeout=null] Optionally set cursor's `noCursorTimeout` flag
206 * @param {number} [options.skip=null] Optional skip for cursor
207 * @param {object} [options.sort=null] Optional sort for cursor
211 GridFSBucket.prototype.find = function(filter, options) {
212 filter = filter || {};
213 options = options || {};
215 var cursor = this.s._filesCollection.find(filter);
217 if (options.batchSize != null) {
218 cursor.batchSize(options.batchSize);
220 if (options.limit != null) {
221 cursor.limit(options.limit);
223 if (options.maxTimeMS != null) {
224 cursor.maxTimeMS(options.maxTimeMS);
226 if (options.noCursorTimeout != null) {
227 cursor.addCursorFlag('noCursorTimeout', options.noCursorTimeout);
229 if (options.skip != null) {
230 cursor.skip(options.skip);
232 if (options.sort != null) {
233 cursor.sort(options.sort);
240 * Returns a readable stream (GridFSBucketReadStream) for streaming the
241 * file with the given name from GridFS. If there are multiple files with
242 * the same name, this will stream the most recent file with the given name
243 * (as determined by the `uploadDate` field). You can set the `revision`
244 * option to change this behavior.
246 * @param {String} filename The name of the file to stream
247 * @param {Object} [options=null] Optional settings
248 * @param {number} [options.revision=-1] The revision number relative to the oldest file with the given filename. 0 gets you the oldest file, 1 gets you the 2nd oldest, -1 gets you the newest.
249 * @param {Number} [options.start=null] Optional 0-based offset in bytes to start streaming from
250 * @param {Number} [options.end=null] Optional 0-based offset in bytes to stop streaming before
251 * @return {GridFSBucketReadStream}
254 GridFSBucket.prototype.openDownloadStreamByName = function(filename, options) {
255 var sort = { uploadDate: -1 };
257 if (options && options.revision != null) {
258 if (options.revision >= 0) {
259 sort = { uploadDate: 1 };
260 skip = options.revision;
262 skip = -options.revision - 1;
266 var filter = { filename: filename };
270 start: options && options.start,
271 end: options && options.end
273 return new GridFSBucketReadStream(this.s._chunksCollection,
274 this.s._filesCollection, this.s.options.readPreference, filter, options);
278 * Renames the file with the given _id to the given string
280 * @param {ObjectId} id the id of the file to rename
281 * @param {String} filename new name for the file
282 * @param {GridFSBucket~errorCallback} [callback]
285 GridFSBucket.prototype.rename = function(id, filename, callback) {
286 if (typeof callback === 'function') {
287 return _rename(this, id, filename, callback);
291 return new this.s.promiseLibrary(function(resolve, reject) {
292 _rename(_this, id, filename, function(error, res) {
306 function _rename(_this, id, filename, callback) {
307 var filter = { _id: id };
308 var update = { $set: { filename: filename } };
309 _this.s._filesCollection.updateOne(filter, update, function(error, res) {
311 return callback(error);
314 return callback(toError('File with id ' + id + ' not found'));
321 * Removes this bucket's files collection, followed by its chunks collection.
323 * @param {GridFSBucket~errorCallback} [callback]
326 GridFSBucket.prototype.drop = function(callback) {
327 if (typeof callback === 'function') {
328 return _drop(this, callback);
332 return new this.s.promiseLibrary(function(resolve, reject) {
333 _drop(_this, function(error, res) {
347 function _drop(_this, callback) {
348 _this.s._filesCollection.drop(function(error) {
350 return callback(error);
352 _this.s._chunksCollection.drop(function(error) {
354 return callback(error);
363 * Callback format for all GridFSBucket methods that can accept a callback.
364 * @callback GridFSBucket~errorCallback
365 * @param {MongoError} error An error instance representing any errors that occurred