diff --git a/.gitignore b/.gitignore index b4b8eb2..6623f59 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,3 @@ *.swo + +node_modules/ diff --git a/README.md b/README.md index 125ce43..298132a 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,10 @@ npm install @outgrowio/reaction-file-collections-sa-s3 Set up your AWS S3 credentials using environment variables: ```bash + +# The CDN or S3 endpoint to download files. +CDN_ENDPOINT=http://some.endpoint.com + # The AWS region your S3 bucket is in (if using S3 on AWS) AWS_S3_REGION=us-east-1 diff --git a/dist/S3Store.js b/dist/S3Store.js index 0a50be4..90152df 100644 --- a/dist/S3Store.js +++ b/dist/S3Store.js @@ -8,6 +8,7 @@ class S3Store extends _fileCollectionsSaBase.default { collectionPrefix = "fc_sa_s3.", fileKeyMaker, name, + isPublic, objectACL, transformRead, transformWrite } = @@ -205,14 +206,34 @@ class S3Store extends _fileCollectionsSaBase.default { - "s3");const s3Params = {};if (process.env.AWS_S3_REGION) {(0, _debug.default)("AWS_S3_REGION:", process.env.AWS_S3_REGION);s3Params.region = process.env.AWS_S3_REGION;}if (process.env.AWS_S3_ENDPOINT) {(0, _debug.default)("AWS_S3_ENDPOINT:", process.env.AWS_S3_ENDPOINT);s3Params.endpoint = process.env.AWS_S3_ENDPOINT;s3Params.s3ForcePathStyle = true;}this.s3 = new _s.default({ apiVersion: "2006-03-01", ...s3Params });this.collectionName = `${collectionPrefix}${name}`.trim();this.objectACL = objectACL;}_fileKeyMaker(fileRecord) {const info = fileRecord.infoForCopy(this.name);(0, _debug.default)("S3Store _fileKeyMaker fileRecord info:", info);(0, _debug.default)("S3Store _fileKeyMaker fileRecord size:", fileRecord.size());const result = { _id: info.key || fileRecord._id, filename: info.name || fileRecord.name() || `${fileRecord.collectionName}-${fileRecord._id}`, size: info.size || fileRecord.size() };(0, _debug.default)("S3Store _fileKeyMaker result:", result);return result;} /** - * This retrieves objects from S3 and sends them to reaction-file-collections as a readable stream. - * The whole point of using S3 being hitting your content's URLs, either directly or through a CDN, - * this might not be what you're looking for. It's there to preserve reaction-file-collection's default - * behavior. - */async _getReadStream(fileKey, { start: startPos, end: endPos } = {}) {(0, _debug.default)("S3Store _getReadStream");const opts = { Bucket: process.env.AWS_S3_BUCKET, Key: fileKey._id }; // Add range if this should be a partial read - if (typeof startPos === "number" && typeof endPos === "number") {opts.Range = `bytes=${startPos}-${endPos}`;}(0, _debug.default)("S3Store _getReadStream opts:", opts);const object = await this.s3.getObject(opts).promise();(0, _debug.default)("S3Store _getReadStream got object:", object);let totalTransferredData = 0;const stream = new _stream.Readable({ read: size => {(0, _debug.default)(`S3Store read body from ${totalTransferredData} to ${totalTransferredData + size}`);const body = object.Body.slice(totalTransferredData, totalTransferredData + size);totalTransferredData += size;(0, _debug.default)(`S3Store _getReadStream transferred ${totalTransferredData}`);stream.push(body);if (typeof endPos === "number" && totalTransferredData >= endPos || totalTransferredData >= fileKey.size) {(0, _debug.default)("S3Store _getReadStream ending stream");stream.push(null);}} });return stream;}async _getWriteStream(fileKey, options = {}) {const opts = { Bucket: process.env.AWS_S3_BUCKET, Key: `${Date.now()}-${fileKey.filename}` };(0, _debug.default)("S3Store _getWriteStream opts:", opts);(0, _debug.default)("S3Store _getWriteStream options:", options);(0, _debug.default)("S3Store _getWriteStream fileKey:", fileKey);(0, _debug.default)("S3Store _getWriteStream objectACL", this.objectACL);let uploadId = "";const uploadData = await this.s3.createMultipartUpload({ ...opts, ACL: this.objectACL }).promise();(0, _debug.default)("s3.createMultipartUpload data:", uploadData);if (uploadData.UploadId === undefined) {throw new Error("Couldn't get upload ID from S3");}uploadId = uploadData.UploadId;let partNumber = 1;let totalFileSize = 0;const parts = [];const writeStream = new _stream.Writable({ write: async (chunk, encoding, callback) => {const partData = await this.s3.uploadPart({ ...opts, Body: chunk, UploadId: uploadId, PartNumber: partNumber }).promise();parts.push({ ETag: partData.ETag, PartNumber: partNumber });(0, _debug.default)(`Part ${partNumber} successfully uploaded`, parts);partNumber += 1;totalFileSize += chunk.length;callback();} });writeStream.on("finish", async () => {(0, _debug.default)("S3Store writeStream finish");(0, _debug.default)("S3Store writeStream totalFileSize:", totalFileSize);const uploadedFile = await this.s3.completeMultipartUpload({ ...opts, UploadId: uploadId, MultipartUpload: { Parts: parts } }).promise();(0, _debug.default)("S3 multipart upload completed", uploadedFile); // Emit end and return the fileKey, size, and updated date + + + + + + + + + + + + + + + + + + "s3");const s3Params = {};if (process.env.AWS_S3_REGION) {(0, _debug.default)("AWS_S3_REGION:", process.env.AWS_S3_REGION);s3Params.region = process.env.AWS_S3_REGION;}if (process.env.AWS_S3_ENDPOINT) {(0, _debug.default)("AWS_S3_ENDPOINT:", process.env.AWS_S3_ENDPOINT);s3Params.endpoint = process.env.AWS_S3_ENDPOINT;s3Params.s3ForcePathStyle = true;}if (process.env.CDN_ENDPOINT) {(0, _debug.default)("CDN_ENDPOINT:", process.env.CDN_ENDPOINT);}this.s3 = new _s.default({ apiVersion: "2006-03-01", ...s3Params });this.collectionName = `${collectionPrefix}${name}`.trim();this.objectACL = objectACL;this.isPublic = isPublic;}_fileKeyMaker(fileRecord) {const info = fileRecord.infoForCopy(this.name);(0, _debug.default)("S3Store _fileKeyMaker fileRecord info:", info);(0, _debug.default)("S3Store _fileKeyMaker fileRecord size:", fileRecord.size());const result = { _id: info.key || fileRecord._id, filename: info.name || fileRecord.name() || `${fileRecord.collectionName}-${fileRecord._id}`, size: info.size || fileRecord.size(), // I want to separate assets by shopId + shopId: fileRecord.metadata.shopId };(0, _debug.default)("S3Store _fileKeyMaker result:", result);return result;} /** + * This retrieves objects from S3 and sends them to reaction-file-collections as a readable stream. + * The whole point of using S3 being hitting your content's URLs, either directly or through a CDN, + * this might not be what you're looking for. It's there to preserve reaction-file-collection's default + * behavior. + */async _getReadStream(fileKey, { start: startPos, end: endPos } = {}) {(0, _debug.default)("S3Store _getReadStream");const opts = { Bucket: process.env.AWS_S3_BUCKET, Key: fileKey._id }; // Add range if this should be a partial read + if (typeof startPos === "number" && typeof endPos === "number") {opts.Range = `bytes=${startPos}-${endPos}`;}(0, _debug.default)("S3Store _getReadStream opts:", opts);const object = await this.s3.getObject(opts).promise();(0, _debug.default)("S3Store _getReadStream got object:", object);let totalTransferredData = 0;const stream = new _stream.Readable({ read: size => {(0, _debug.default)(`S3Store read body from ${totalTransferredData} to ${totalTransferredData + size}`);const body = object.Body.slice(totalTransferredData, totalTransferredData + size);totalTransferredData += size;(0, _debug.default)(`S3Store _getReadStream transferred ${totalTransferredData}`);stream.push(body);if (typeof endPos === "number" && totalTransferredData >= endPos || totalTransferredData >= fileKey.size) {(0, _debug.default)("S3Store _getReadStream ending stream");stream.push(null);}} });return stream;}async _getWriteStream(fileKey, options = {}) {// it's pretty usefull separate assets by shop. My only concern is that we are using shopId without opaque it. + const key = `${fileKey.shopId}/${Date.now()}-${fileKey.filename}`; // set externalUrl if the bucket is public + const externalUrl = this.isPublic ? `${process.env.CDN_ENDPOINT}/${key}` : null;const opts = { Bucket: process.env.AWS_S3_BUCKET, Key: key };(0, _debug.default)("S3Store _getWriteStream opts:", opts);(0, _debug.default)("S3Store _getWriteStream options:", options);(0, _debug.default)("S3Store _getWriteStream fileKey:", fileKey);(0, _debug.default)("S3Store _getWriteStream objectACL", this.objectACL);(0, _debug.default)("S3Store _getWriteStream externalUrl", externalUrl);let uploadId = "";const uploadData = await this.s3.createMultipartUpload({ ...opts, ACL: this.objectACL }).promise();(0, _debug.default)("s3.createMultipartUpload data:", uploadData);if (uploadData.UploadId === undefined) {throw new Error("Couldn't get upload ID from S3");}uploadId = uploadData.UploadId;let partNumber = 1;let totalFileSize = 0;const parts = [];const writeStream = new _stream.Writable({ write: async (chunk, encoding, callback) => {const partData = await this.s3.uploadPart({ ...opts, Body: chunk, UploadId: uploadId, PartNumber: partNumber }).promise();parts.push({ ETag: partData.ETag, PartNumber: partNumber });(0, _debug.default)(`Part ${partNumber} successfully uploaded`, parts);partNumber += 1;totalFileSize += chunk.length;callback();} });writeStream.on("finish", async () => {(0, _debug.default)("S3Store writeStream finish");(0, _debug.default)("S3Store writeStream totalFileSize:", totalFileSize);const uploadedFile = await this.s3.completeMultipartUpload({ ...opts, UploadId: uploadId, MultipartUpload: { Parts: parts } }).promise();(0, _debug.default)("S3 multipart upload completed", uploadedFile); // Emit end and return the fileKey, size, and updated date writeStream.emit("stored", { // Set the generated _id so that we know it for future reads and writes. // We store the _id as a string and only convert to ObjectID right before // reading, writing, or deleting. - fileKey: uploadedFile.Key, storedAt: new Date(), size: totalFileSize });});return writeStream;}_removeFile(fileKey) {(0, _debug.default)("S3Store _removeFile called for fileKey", fileKey);if (!fileKey._id) return Promise.resolve();return new Promise((resolve, reject) => {this.s3.deleteObject({ Bucket: process.env.AWS_S3_BUCKET, Key: fileKey._id }, (error, result) => {if (error) {reject(error);} else {resolve(result);}});});}}exports.default = S3Store; \ No newline at end of file + fileKey: uploadedFile.Key, storedAt: new Date(), size: totalFileSize, externalUrl });});return writeStream;}_removeFile(fileKey) {(0, _debug.default)("S3Store _removeFile called for fileKey", fileKey);if (!fileKey._id) return Promise.resolve();return new Promise((resolve, reject) => {this.s3.deleteObject({ Bucket: process.env.AWS_S3_BUCKET, Key: fileKey._id }, (error, result) => {if (error) {reject(error);} else {resolve(result);}});});}}exports.default = S3Store; \ No newline at end of file diff --git a/package.json b/package.json index 8f1179a..b2ca432 100644 --- a/package.json +++ b/package.json @@ -41,9 +41,9 @@ "prepublishOnly": "npm run build" }, "dependencies": { - "@reactioncommerce/file-collections-sa-base": "^0.1.1", - "aws-sdk": "^2.610.0", "@babel/runtime": "^7.8.4", + "@reactioncommerce/file-collections-sa-base": "^0.2.0", + "aws-sdk": "^2.610.0", "debug": "^4.1.1" }, "devDependencies": { @@ -58,4 +58,4 @@ "publishConfig": { "access": "public" } -} +} \ No newline at end of file diff --git a/src/S3Store.js b/src/S3Store.js index 8f226c5..239c1b1 100644 --- a/src/S3Store.js +++ b/src/S3Store.js @@ -8,6 +8,7 @@ export default class S3Store extends StorageAdapter { collectionPrefix = "fc_sa_s3.", fileKeyMaker, name, + isPublic, objectACL, transformRead, transformWrite @@ -32,6 +33,11 @@ export default class S3Store extends StorageAdapter { s3Params.s3ForcePathStyle = true; } + if (process.env.CDN_ENDPOINT) { + debug("CDN_ENDPOINT:", process.env.CDN_ENDPOINT); + } + + this.s3 = new S3({ apiVersion: "2006-03-01", ...s3Params @@ -39,6 +45,7 @@ export default class S3Store extends StorageAdapter { this.collectionName = `${collectionPrefix}${name}`.trim(); this.objectACL = objectACL; + this.isPublic = isPublic; } _fileKeyMaker(fileRecord) { @@ -50,7 +57,9 @@ export default class S3Store extends StorageAdapter { const result = { _id: info.key || fileRecord._id, filename: info.name || fileRecord.name() || `${fileRecord.collectionName}-${fileRecord._id}`, - size: info.size || fileRecord.size() + size: info.size || fileRecord.size(), + // I want to separate assets by shopId + shopId: fileRecord.metadata.shopId }; debug("S3Store _fileKeyMaker result:", result); @@ -107,15 +116,23 @@ export default class S3Store extends StorageAdapter { } async _getWriteStream(fileKey, options = {}) { + + // it's pretty usefull separate assets by shop. My only concern is that we are using shopId without opaque it. + const key = `${fileKey.shopId}/${Date.now()}-${fileKey.filename}`; + + // set externalUrl if the bucket is public + const externalUrl = this.isPublic ? `${process.env.CDN_ENDPOINT}/${key}` : null; + const opts = { Bucket: process.env.AWS_S3_BUCKET, - Key: `${Date.now()}-${fileKey.filename}` + Key: key }; debug("S3Store _getWriteStream opts:", opts); debug("S3Store _getWriteStream options:", options); debug("S3Store _getWriteStream fileKey:", fileKey); debug("S3Store _getWriteStream objectACL", this.objectACL); + debug("S3Store _getWriteStream externalUrl", externalUrl); let uploadId = ""; @@ -180,7 +197,8 @@ export default class S3Store extends StorageAdapter { // reading, writing, or deleting. fileKey: uploadedFile.Key, storedAt: new Date(), - size: totalFileSize + size: totalFileSize, + externalUrl }); });