Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

support external url S3 #3

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
*.swo

node_modules/
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@ npm install @outgrowio/reaction-file-collections-sa-s3
Set up your AWS S3 credentials using environment variables:

```bash

# The CDN or S3 endpoint to download files.
CDN_ENDPOINT=http://some.endpoint.com

# The AWS region your S3 bucket is in (if using S3 on AWS)
AWS_S3_REGION=us-east-1

Expand Down
37 changes: 29 additions & 8 deletions dist/S3Store.js
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ class S3Store extends _fileCollectionsSaBase.default {
collectionPrefix = "fc_sa_s3.",
fileKeyMaker,
name,
isPublic,
objectACL,
transformRead,
transformWrite } =
Expand Down Expand Up @@ -205,14 +206,34 @@ class S3Store extends _fileCollectionsSaBase.default {



"s3");const s3Params = {};if (process.env.AWS_S3_REGION) {(0, _debug.default)("AWS_S3_REGION:", process.env.AWS_S3_REGION);s3Params.region = process.env.AWS_S3_REGION;}if (process.env.AWS_S3_ENDPOINT) {(0, _debug.default)("AWS_S3_ENDPOINT:", process.env.AWS_S3_ENDPOINT);s3Params.endpoint = process.env.AWS_S3_ENDPOINT;s3Params.s3ForcePathStyle = true;}this.s3 = new _s.default({ apiVersion: "2006-03-01", ...s3Params });this.collectionName = `${collectionPrefix}${name}`.trim();this.objectACL = objectACL;}_fileKeyMaker(fileRecord) {const info = fileRecord.infoForCopy(this.name);(0, _debug.default)("S3Store _fileKeyMaker fileRecord info:", info);(0, _debug.default)("S3Store _fileKeyMaker fileRecord size:", fileRecord.size());const result = { _id: info.key || fileRecord._id, filename: info.name || fileRecord.name() || `${fileRecord.collectionName}-${fileRecord._id}`, size: info.size || fileRecord.size() };(0, _debug.default)("S3Store _fileKeyMaker result:", result);return result;} /**
* This retrieves objects from S3 and sends them to reaction-file-collections as a readable stream.
* The whole point of using S3 being hitting your content's URLs, either directly or through a CDN,
* this might not be what you're looking for. It's there to preserve reaction-file-collection's default
* behavior.
*/async _getReadStream(fileKey, { start: startPos, end: endPos } = {}) {(0, _debug.default)("S3Store _getReadStream");const opts = { Bucket: process.env.AWS_S3_BUCKET, Key: fileKey._id }; // Add range if this should be a partial read
if (typeof startPos === "number" && typeof endPos === "number") {opts.Range = `bytes=${startPos}-${endPos}`;}(0, _debug.default)("S3Store _getReadStream opts:", opts);const object = await this.s3.getObject(opts).promise();(0, _debug.default)("S3Store _getReadStream got object:", object);let totalTransferredData = 0;const stream = new _stream.Readable({ read: size => {(0, _debug.default)(`S3Store read body from ${totalTransferredData} to ${totalTransferredData + size}`);const body = object.Body.slice(totalTransferredData, totalTransferredData + size);totalTransferredData += size;(0, _debug.default)(`S3Store _getReadStream transferred ${totalTransferredData}`);stream.push(body);if (typeof endPos === "number" && totalTransferredData >= endPos || totalTransferredData >= fileKey.size) {(0, _debug.default)("S3Store _getReadStream ending stream");stream.push(null);}} });return stream;}async _getWriteStream(fileKey, options = {}) {const opts = { Bucket: process.env.AWS_S3_BUCKET, Key: `${Date.now()}-${fileKey.filename}` };(0, _debug.default)("S3Store _getWriteStream opts:", opts);(0, _debug.default)("S3Store _getWriteStream options:", options);(0, _debug.default)("S3Store _getWriteStream fileKey:", fileKey);(0, _debug.default)("S3Store _getWriteStream objectACL", this.objectACL);let uploadId = "";const uploadData = await this.s3.createMultipartUpload({ ...opts, ACL: this.objectACL }).promise();(0, _debug.default)("s3.createMultipartUpload data:", uploadData);if (uploadData.UploadId === undefined) {throw new Error("Couldn't get upload ID from S3");}uploadId = uploadData.UploadId;let partNumber = 1;let totalFileSize = 0;const parts = [];const writeStream = new _stream.Writable({ write: async (chunk, encoding, callback) => {const partData = await this.s3.uploadPart({ ...opts, Body: chunk, UploadId: uploadId, PartNumber: partNumber }).promise();parts.push({ ETag: partData.ETag, PartNumber: partNumber });(0, _debug.default)(`Part ${partNumber} successfully uploaded`, parts);partNumber += 1;totalFileSize += chunk.length;callback();} });writeStream.on("finish", async () => {(0, _debug.default)("S3Store writeStream finish");(0, _debug.default)("S3Store writeStream totalFileSize:", totalFileSize);const uploadedFile = await this.s3.completeMultipartUpload({ ...opts, UploadId: uploadId, MultipartUpload: { Parts: parts } }).promise();(0, _debug.default)("S3 multipart upload completed", uploadedFile); // Emit end and return the fileKey, size, and updated date

















"s3");const s3Params = {};if (process.env.AWS_S3_REGION) {(0, _debug.default)("AWS_S3_REGION:", process.env.AWS_S3_REGION);s3Params.region = process.env.AWS_S3_REGION;}if (process.env.AWS_S3_ENDPOINT) {(0, _debug.default)("AWS_S3_ENDPOINT:", process.env.AWS_S3_ENDPOINT);s3Params.endpoint = process.env.AWS_S3_ENDPOINT;s3Params.s3ForcePathStyle = true;}if (process.env.CDN_ENDPOINT) {(0, _debug.default)("CDN_ENDPOINT:", process.env.CDN_ENDPOINT);}this.s3 = new _s.default({ apiVersion: "2006-03-01", ...s3Params });this.collectionName = `${collectionPrefix}${name}`.trim();this.objectACL = objectACL;this.isPublic = isPublic;}_fileKeyMaker(fileRecord) {const info = fileRecord.infoForCopy(this.name);(0, _debug.default)("S3Store _fileKeyMaker fileRecord info:", info);(0, _debug.default)("S3Store _fileKeyMaker fileRecord size:", fileRecord.size());const result = { _id: info.key || fileRecord._id, filename: info.name || fileRecord.name() || `${fileRecord.collectionName}-${fileRecord._id}`, size: info.size || fileRecord.size(), // I want to separate assets by shopId
shopId: fileRecord.metadata.shopId };(0, _debug.default)("S3Store _fileKeyMaker result:", result);return result;} /**
* This retrieves objects from S3 and sends them to reaction-file-collections as a readable stream.
* The whole point of using S3 being hitting your content's URLs, either directly or through a CDN,
* this might not be what you're looking for. It's there to preserve reaction-file-collection's default
* behavior.
*/async _getReadStream(fileKey, { start: startPos, end: endPos } = {}) {(0, _debug.default)("S3Store _getReadStream");const opts = { Bucket: process.env.AWS_S3_BUCKET, Key: fileKey._id }; // Add range if this should be a partial read
if (typeof startPos === "number" && typeof endPos === "number") {opts.Range = `bytes=${startPos}-${endPos}`;}(0, _debug.default)("S3Store _getReadStream opts:", opts);const object = await this.s3.getObject(opts).promise();(0, _debug.default)("S3Store _getReadStream got object:", object);let totalTransferredData = 0;const stream = new _stream.Readable({ read: size => {(0, _debug.default)(`S3Store read body from ${totalTransferredData} to ${totalTransferredData + size}`);const body = object.Body.slice(totalTransferredData, totalTransferredData + size);totalTransferredData += size;(0, _debug.default)(`S3Store _getReadStream transferred ${totalTransferredData}`);stream.push(body);if (typeof endPos === "number" && totalTransferredData >= endPos || totalTransferredData >= fileKey.size) {(0, _debug.default)("S3Store _getReadStream ending stream");stream.push(null);}} });return stream;}async _getWriteStream(fileKey, options = {}) {// it's pretty usefull separate assets by shop. My only concern is that we are using shopId without opaque it.
const key = `${fileKey.shopId}/${Date.now()}-${fileKey.filename}`; // set externalUrl if the bucket is public
const externalUrl = this.isPublic ? `${process.env.CDN_ENDPOINT}/${key}` : null;const opts = { Bucket: process.env.AWS_S3_BUCKET, Key: key };(0, _debug.default)("S3Store _getWriteStream opts:", opts);(0, _debug.default)("S3Store _getWriteStream options:", options);(0, _debug.default)("S3Store _getWriteStream fileKey:", fileKey);(0, _debug.default)("S3Store _getWriteStream objectACL", this.objectACL);(0, _debug.default)("S3Store _getWriteStream externalUrl", externalUrl);let uploadId = "";const uploadData = await this.s3.createMultipartUpload({ ...opts, ACL: this.objectACL }).promise();(0, _debug.default)("s3.createMultipartUpload data:", uploadData);if (uploadData.UploadId === undefined) {throw new Error("Couldn't get upload ID from S3");}uploadId = uploadData.UploadId;let partNumber = 1;let totalFileSize = 0;const parts = [];const writeStream = new _stream.Writable({ write: async (chunk, encoding, callback) => {const partData = await this.s3.uploadPart({ ...opts, Body: chunk, UploadId: uploadId, PartNumber: partNumber }).promise();parts.push({ ETag: partData.ETag, PartNumber: partNumber });(0, _debug.default)(`Part ${partNumber} successfully uploaded`, parts);partNumber += 1;totalFileSize += chunk.length;callback();} });writeStream.on("finish", async () => {(0, _debug.default)("S3Store writeStream finish");(0, _debug.default)("S3Store writeStream totalFileSize:", totalFileSize);const uploadedFile = await this.s3.completeMultipartUpload({ ...opts, UploadId: uploadId, MultipartUpload: { Parts: parts } }).promise();(0, _debug.default)("S3 multipart upload completed", uploadedFile); // Emit end and return the fileKey, size, and updated date
writeStream.emit("stored", { // Set the generated _id so that we know it for future reads and writes.
// We store the _id as a string and only convert to ObjectID right before
// reading, writing, or deleting.
fileKey: uploadedFile.Key, storedAt: new Date(), size: totalFileSize });});return writeStream;}_removeFile(fileKey) {(0, _debug.default)("S3Store _removeFile called for fileKey", fileKey);if (!fileKey._id) return Promise.resolve();return new Promise((resolve, reject) => {this.s3.deleteObject({ Bucket: process.env.AWS_S3_BUCKET, Key: fileKey._id }, (error, result) => {if (error) {reject(error);} else {resolve(result);}});});}}exports.default = S3Store;
fileKey: uploadedFile.Key, storedAt: new Date(), size: totalFileSize, externalUrl });});return writeStream;}_removeFile(fileKey) {(0, _debug.default)("S3Store _removeFile called for fileKey", fileKey);if (!fileKey._id) return Promise.resolve();return new Promise((resolve, reject) => {this.s3.deleteObject({ Bucket: process.env.AWS_S3_BUCKET, Key: fileKey._id }, (error, result) => {if (error) {reject(error);} else {resolve(result);}});});}}exports.default = S3Store;
6 changes: 3 additions & 3 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,9 @@
"prepublishOnly": "npm run build"
},
"dependencies": {
"@reactioncommerce/file-collections-sa-base": "^0.1.1",
"aws-sdk": "^2.610.0",
"@babel/runtime": "^7.8.4",
"@reactioncommerce/file-collections-sa-base": "^0.2.0",
"aws-sdk": "^2.610.0",
"debug": "^4.1.1"
},
"devDependencies": {
Expand All @@ -58,4 +58,4 @@
"publishConfig": {
"access": "public"
}
}
}
24 changes: 21 additions & 3 deletions src/S3Store.js
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ export default class S3Store extends StorageAdapter {
collectionPrefix = "fc_sa_s3.",
fileKeyMaker,
name,
isPublic,
objectACL,
transformRead,
transformWrite
Expand All @@ -32,13 +33,19 @@ export default class S3Store extends StorageAdapter {
s3Params.s3ForcePathStyle = true;
}

if (process.env.CDN_ENDPOINT) {
debug("CDN_ENDPOINT:", process.env.CDN_ENDPOINT);
}


this.s3 = new S3({
apiVersion: "2006-03-01",
...s3Params
});

this.collectionName = `${collectionPrefix}${name}`.trim();
this.objectACL = objectACL;
this.isPublic = isPublic;
}

_fileKeyMaker(fileRecord) {
Expand All @@ -50,7 +57,9 @@ export default class S3Store extends StorageAdapter {
const result = {
_id: info.key || fileRecord._id,
filename: info.name || fileRecord.name() || `${fileRecord.collectionName}-${fileRecord._id}`,
size: info.size || fileRecord.size()
size: info.size || fileRecord.size(),
// I want to separate assets by shopId
shopId: fileRecord.metadata.shopId
};

debug("S3Store _fileKeyMaker result:", result);
Expand Down Expand Up @@ -107,15 +116,23 @@ export default class S3Store extends StorageAdapter {
}

async _getWriteStream(fileKey, options = {}) {

// it's pretty usefull separate assets by shop. My only concern is that we are using shopId without opaque it.
const key = `${fileKey.shopId}/${Date.now()}-${fileKey.filename}`;

// set externalUrl if the bucket is public
const externalUrl = this.isPublic ? `${process.env.CDN_ENDPOINT}/${key}` : null;

const opts = {
Bucket: process.env.AWS_S3_BUCKET,
Key: `${Date.now()}-${fileKey.filename}`
Key: key
};

debug("S3Store _getWriteStream opts:", opts);
debug("S3Store _getWriteStream options:", options);
debug("S3Store _getWriteStream fileKey:", fileKey);
debug("S3Store _getWriteStream objectACL", this.objectACL);
debug("S3Store _getWriteStream externalUrl", externalUrl);

let uploadId = "";

Expand Down Expand Up @@ -180,7 +197,8 @@ export default class S3Store extends StorageAdapter {
// reading, writing, or deleting.
fileKey: uploadedFile.Key,
storedAt: new Date(),
size: totalFileSize
size: totalFileSize,
externalUrl
});
});

Expand Down