diff --git a/lib/countly-bulk.js b/lib/countly-bulk.js index 6efed27..1799654 100644 --- a/lib/countly-bulk.js +++ b/lib/countly-bulk.js @@ -25,6 +25,7 @@ var https = require("https"); var cluster = require("cluster"); var cc = require("./countly-common"); var BulkUser = require("./countly-bulk-user"); +var CountlyStorage = require("./countly-storage"); /** * @lends module:lib/countly-bulk @@ -40,7 +41,7 @@ var BulkUser = require("./countly-bulk-user"); * @param {number} [conf.max_events=100] - maximum amount of events to send in one batch * @param {boolean} [conf.persist_queue=false] - persistently store queue until processed, default is false if you want to keep queue in memory and process all in one process run * @param {boolean} [conf.force_post=false] - force using post method for all requests - * @param {string} [conf.storage_path="../bulk_data/"] - where SDK would store data, including id, queues, etc + * @param {string} [conf.storage_path] - where SDK would store data, including id, queues, etc * @param {string} [conf.http_options=] - function to get http options by reference and overwrite them, before running each request * @param {number} [conf.max_key_length=128] - maximum size of all string keys * @param {number} [conf.max_value_size=256] - maximum size of all values in our key-value pairs (Except "picture" field, that has a limit of 4096 chars) @@ -73,7 +74,6 @@ function CountlyBulk(conf) { var maxBreadcrumbCount = 100; var maxStackTraceLinesPerThread = 30; var maxStackTraceLineLength = 200; - var __data = {}; cc.debugBulk = conf.debug || false; if (!conf.app_key) { @@ -96,7 +96,6 @@ function CountlyBulk(conf) { conf.max_events = conf.max_events || 100; conf.force_post = conf.force_post || false; conf.persist_queue = conf.persist_queue || false; - conf.storage_path = conf.storage_path || "../bulk_data/"; conf.http_options = conf.http_options || null; conf.maxKeyLength = conf.max_key_length || maxKeyLength; conf.maxValueSize = conf.max_value_size || maxValueSize; @@ -105,7 +104,9 @@ function CountlyBulk(conf) { conf.maxStackTraceLinesPerThread = conf.max_stack_trace_lines_per_thread || maxStackTraceLinesPerThread; conf.maxStackTraceLineLength = conf.max_stack_trace_line_length || maxStackTraceLineLength; - var mainDir = path.resolve(__dirname, conf.storage_path); + CountlyStorage.setBulkDataPath(conf.storage_path); + + var mainDir = path.resolve(__dirname, CountlyStorage.getStoragePath()); if (conf.persist_queue) { try { if (!fs.existsSync(mainDir)) { @@ -157,7 +158,7 @@ function CountlyBulk(conf) { requestQueue.push(query); cc.log(cc.logLevelEnums.INFO, "CountlyBulk add_request, Adding request to the queue."); - storeSet("cly_req_queue", requestQueue); + CountlyStorage.storeSet("cly_req_queue", requestQueue); } else { cc.log(cc.logLevelEnums.INFO, "CountlyBulk add_request, Sending message to the parent process. Adding the raw request to the queue."); @@ -205,7 +206,7 @@ function CountlyBulk(conf) { cc.log(cc.logLevelEnums.INFO, "CountlyBulk add_bulk_request, adding the request into queue."); requestQueue.push(query); } - storeSet("cly_req_queue", requestQueue); + CountlyStorage.storeSet("cly_req_queue", requestQueue); } else { cc.log(cc.logLevelEnums.INFO, "CountlyBulk add_bulk_request, Sending message to the parent process. Adding the raw request to the queue."); @@ -260,7 +261,7 @@ function CountlyBulk(conf) { eventQueue[device_id] = []; } eventQueue[device_id].push(e); - storeSet("cly_bulk_event", eventQueue); + CountlyStorage.storeSet("cly_bulk_event", eventQueue); } else { cc.log(cc.logLevelEnums.INFO, `CountlyBulk add_event, Sending message to the parent process. Adding event: [${event.key}].`); @@ -358,7 +359,7 @@ function CountlyBulk(conf) { */ function toBulkRequestQueue(bulkRequest) { bulkQueue.push(bulkRequest); - storeSet("cly_bulk_queue", bulkQueue); + CountlyStorage.storeSet("cly_bulk_queue", bulkQueue); } var self = this; @@ -384,7 +385,7 @@ function CountlyBulk(conf) { } if (eventChanges) { isEmpty = false; - storeSet("cly_bulk_event", eventQueue); + CountlyStorage.storeSet("cly_bulk_event", eventQueue); } // process request queue into bulk requests @@ -398,7 +399,7 @@ function CountlyBulk(conf) { var requests = requestQueue.splice(0, conf.bulk_size); toBulkRequestQueue({ app_key: conf.app_key, requests: JSON.stringify(requests) }); } - storeSet("cly_req_queue", requestQueue); + CountlyStorage.storeSet("cly_req_queue", requestQueue); } // process bulk request queue @@ -413,7 +414,7 @@ function CountlyBulk(conf) { bulkQueue.unshift(res); failTimeout = cc.getTimestamp() + conf.fail_timeout; } - storeSet("cly_bulk_queue", bulkQueue); + CountlyStorage.storeSet("cly_bulk_queue", bulkQueue); readyToProcess = true; }, "heartBeat", false); } @@ -594,111 +595,6 @@ function CountlyBulk(conf) { } } - /** - * Read value from file - * @param {String} key - key for file - * @returns {varies} value in file - */ - var readFile = function(key) { - var data; - if (conf.persist_queue) { - var dir = path.resolve(__dirname, `${conf.storage_path}__${key}.json`); - - // try reading data file - try { - data = fs.readFileSync(dir); - } - catch (ex) { - // there was no file, probably new init - cc.log(cc.logLevelEnums.ERROR, "CountlyBulk readFile, Nothing to read. Might be first init. Error: ", ex); - data = null; - } - - try { - // trying to parse json string - data = JSON.parse(data); - } - catch (ex) { - // problem parsing, corrupted file? - cc.log(cc.logLevelEnums.ERROR, "CountlyBulk readFile, Problem while parsing. Error:", ex.stack); - // backup corrupted file data - fs.writeFile(path.resolve(__dirname, `${conf.storage_path}__${key}.${cc.getTimestamp()}${Math.random()}.json`), data, () => {}); - // start with new clean object - data = null; - } - } - return data; - }; - - var asyncWriteLock = false; - var asyncWriteQueue = []; - - /** - * Write to file and process queue while in asyncWriteLock - * @param {String} key - key for value to store - * @param {varies} value - value to store - * @param {Function} callback - callback to call when done storing - */ - var writeFile = function(key, value, callback) { - var ob = {}; - ob[key] = value; - var dir = path.resolve(__dirname, `${conf.storage_path}__${key}.json`); - fs.writeFile(dir, JSON.stringify(ob), (err) => { - if (err) { - // eslint-disable-next-line no-console - cc.log(cc.logLevelEnums.ERROR, "CountlyBulk writeFile, Problem while writing. Error:", err); - } - if (typeof callback === "function") { - callback(err); - } - if (asyncWriteQueue.length) { - setTimeout(() => { - var arr = asyncWriteQueue.shift(); - writeFile(arr[0], arr[1], arr[2]); - }, 0); - } - else { - asyncWriteLock = false; - } - }); - }; - - /** - * Save value in storage - * @param {String} key - key for value to store - * @param {varies} value - value to store - * @param {Function} callback - callback to call when done storing - */ - var storeSet = function(key, value, callback) { - __data[key] = value; - if (!asyncWriteLock) { - asyncWriteLock = true; - writeFile(key, value, callback); - } - else { - asyncWriteQueue.push([key, value, callback]); - } - }; - - /** - * Get value from storage - * @param {String} key - key of value to get - * @param {varies} def - default value to use if not set - * @returns {varies} value for the key - */ - var storeGet = function(key, def) { - if (typeof __data[key] === "undefined") { - var ob = readFile(key); - if (!ob) { - __data[key] = def; - } - else { - __data[key] = ob[key]; - } - } - return __data[key]; - }; - // listen to current workers if (cluster.workers) { for (var id in cluster.workers) { @@ -711,9 +607,9 @@ function CountlyBulk(conf) { worker.on("message", handleWorkerMessage); }); - var requestQueue = storeGet("cly_req_queue", []); - var eventQueue = storeGet("cly_bulk_event", {}); - var bulkQueue = storeGet("cly_bulk_queue", []); + var requestQueue = CountlyStorage.storeGet("cly_req_queue", []); + var eventQueue = CountlyStorage.storeGet("cly_bulk_event", {}); + var bulkQueue = CountlyStorage.storeGet("cly_bulk_queue", []); } module.exports = CountlyBulk; diff --git a/lib/countly-storage.js b/lib/countly-storage.js new file mode 100644 index 0000000..272704f --- /dev/null +++ b/lib/countly-storage.js @@ -0,0 +1,167 @@ +const fs = require('fs'); +const path = require('path'); +var cc = require("./countly-common"); +var storagePath; +var __data = {}; + +var setStoragePath = function (path) { + defaultPath = "../data/"; // Default path + storagePath = path || defaultPath; +} + +var setBulkDataPath = function (path) { + defaultPath = "../bulk_data/"; // Default path + storagePath = path || defaultPath; +} + +var getStoragePath = function () { + return storagePath; +} + +var clearStoragePath = function () { + storagePath = undefined; +} + +/** + * Read value from file + * @param {String} key - key for file + * @returns {varies} value in file + */ +var readFile = function (key) { + var dir = path.resolve(__dirname, `${getStoragePath()}__${key}.json`); + + // try reading data file + var data; + try { + data = fs.readFileSync(dir); + } + catch (ex) { + // there was no file, probably new init + data = null; + } + + try { + // trying to parse json string + data = JSON.parse(data); + } + catch (ex) { + // problem parsing, corrupted file? + cc.log(cc.logLevelEnums.ERROR, `readFile, Failed to parse the file with key: [${key}]. Error: [${ex}].`); + // backup corrupted file data + fs.writeFile(path.resolve(__dirname, `${getStoragePath()}__${key}.${cc.getTimestamp()}${Math.random()}.json`), data, () => { }); + // start with new clean object + data = null; + } + return data; +}; + +/** + * Force store data synchronously on unrecoverable errors to preserve it for next launch + */ +var forceStore = function () { + for (var i in __data) { + var dir = path.resolve(__dirname, `${getStoragePath()}__${i}.json`); + var ob = {}; + ob[i] = __data[i]; + try { + fs.writeFileSync(dir, JSON.stringify(ob)); + } + catch (ex) { + // tried to save whats possible + cc.log(cc.logLevelEnums.ERROR, `forceStore, Saving files failed. Error: [${ex}].`); + } + } +}; + +var asyncWriteLock = false; +var asyncWriteQueue = []; + +/** + * Write to file and process queue while in asyncWriteLock + * @param {String} key - key for value to store + * @param {varies} value - value to store + * @param {Function} callback - callback to call when done storing + */ +var writeFile = function (key, value, callback) { + var ob = {}; + ob[key] = value; + var dir = path.resolve(__dirname, `${getStoragePath()}__${key}.json`); + fs.writeFile(dir, JSON.stringify(ob), (err) => { + if (err) { + cc.log(cc.logLevelEnums.ERROR, `writeFile, Writing files failed. Error: [${err}].`); + } + if (typeof callback === "function") { + callback(err); + } + if (asyncWriteQueue.length) { + setTimeout(() => { + var arr = asyncWriteQueue.shift(); + writeFile(arr[0], arr[1], arr[2]); + }, 0); + } + else { + asyncWriteLock = false; + } + }); +}; + +/** + * Save value in storage + * @param {String} key - key for value to store + * @param {varies} value - value to store + * @param {Function} callback - callback to call when done storing + */ +var storeSet = function (key, value, callback) { + __data[key] = value; + if (!asyncWriteLock) { + asyncWriteLock = true; + writeFile(key, value, callback); + } + else { + asyncWriteQueue.push([key, value, callback]); + } +}; + +/** + * Get value from storage + * @param {String} key - key of value to get + * @param {varies} def - default value to use if not set + * @returns {varies} value for the key + */ +var storeGet = function (key, def) { + cc.log(cc.logLevelEnums.DEBUG, `storeGet, Fetching item from storage with key: [${key}].`); + if (typeof __data[key] === "undefined") { + var ob = readFile(key); + var obLen; + // check if the 'read object' is empty or not + try { + obLen = Object.keys(ob).length; + } + catch (error) { + // if we can not even asses length set it to 0 so we can return the default value + obLen = 0; + } + + // if empty or falsy set default value + if (!ob || obLen === 0) { + __data[key] = def; + } + // else set the value read file has + else { + __data[key] = ob[key]; + } + } + return __data[key]; +}; + +module.exports = { + writeFile, + storeGet, + storeSet, + forceStore, + getStoragePath, + setStoragePath, + setBulkDataPath, + clearStoragePath, + readFile, +}; \ No newline at end of file diff --git a/lib/countly.js b/lib/countly.js index b217d21..c475174 100644 --- a/lib/countly.js +++ b/lib/countly.js @@ -28,6 +28,7 @@ var https = require("https"); var cluster = require("cluster"); var cc = require("./countly-common"); var Bulk = require("./countly-bulk"); +var CountlyStorage = require("./countly-storage"); var Countly = {}; @@ -71,7 +72,6 @@ Countly.Bulk = Bulk; var maxBreadcrumbCount = 100; var maxStackTraceLinesPerThread = 30; var maxStackTraceLineLength = 200; - var __data = {}; var deviceIdType = null; /** @@ -104,7 +104,7 @@ Countly.Bulk = Bulk; * @param {boolean} [conf.force_post=false] - force using post method for all requests * @param {boolean} [conf.clear_stored_device_id=false] - set it to true if you want to erase the stored device ID * @param {boolean} [conf.test_mode=false] - set it to true if you want to initiate test_mode - * @param {string} [conf.storage_path="../data/"] - where SDK would store data, including id, queues, etc + * @param {string} [conf.storage_path] - where SDK would store data, including id, queues, etc * @param {boolean} [conf.require_consent=false] - pass true if you are implementing GDPR compatible consent management. It would prevent running any functionality without proper consent * @param {boolean|function} [conf.remote_config=false] - Enable automatic remote config fetching, provide callback function to be notified when fetching done * @param {function} [conf.http_options=] - function to get http options by reference and overwrite them, before running each request @@ -159,7 +159,6 @@ Countly.Bulk = Bulk; Countly.city = conf.city || Countly.city || null; Countly.ip_address = conf.ip_address || Countly.ip_address || null; Countly.force_post = conf.force_post || Countly.force_post || false; - Countly.storage_path = conf.storage_path || Countly.storage_path || "../data/"; Countly.require_consent = conf.require_consent || Countly.require_consent || false; Countly.remote_config = conf.remote_config || Countly.remote_config || false; Countly.http_options = conf.http_options || Countly.http_options || null; @@ -172,8 +171,9 @@ Countly.Bulk = Bulk; // Common module debug value is set to init time debug value cc.debug = conf.debug; + CountlyStorage.setStoragePath(conf.storage_path); - var dir = path.resolve(__dirname, Countly.storage_path); + var dir = path.resolve(__dirname, CountlyStorage.getStoragePath()); try { if (!fs.existsSync(dir)) { fs.mkdirSync(dir, { recursive: true }); @@ -186,8 +186,8 @@ Countly.Bulk = Bulk; // clear stored device ID if flag is set if (conf.clear_stored_device_id) { cc.log(cc.logLevelEnums.WARNING, "init, clear_stored_device_id is true, erasing the stored ID."); - storeSet("cly_id", null); - storeSet("cly_id_type", null); + CountlyStorage.storeSet("cly_id", null); + CountlyStorage.storeSet("cly_id_type", null); } if (Countly.url === "") { @@ -233,8 +233,8 @@ Countly.Bulk = Bulk; if (cluster.isMaster) { // fetch stored ID and ID type - var storedId = storeGet("cly_id", null); - var storedIdType = storeGet("cly_id_type", null); + var storedId = CountlyStorage.storeGet("cly_id", null); + var storedIdType = CountlyStorage.storeGet("cly_id_type", null); // if there was a stored ID if (storedId !== null) { Countly.device_id = storedId; @@ -263,12 +263,12 @@ Countly.Bulk = Bulk; deviceIdType = cc.deviceIdTypeEnums.SDK_GENERATED; } // save the ID and ID type - storeSet("cly_id", Countly.device_id); - storeSet("cly_id_type", deviceIdType); + CountlyStorage.storeSet("cly_id", Countly.device_id); + CountlyStorage.storeSet("cly_id_type", deviceIdType); // create queues - requestQueue = storeGet("cly_queue", []); - eventQueue = storeGet("cly_event", []); - remoteConfigs = storeGet("cly_remote_configs", {}); + requestQueue = CountlyStorage.storeGet("cly_queue", []); + eventQueue = CountlyStorage.storeGet("cly_event", []); + remoteConfigs = CountlyStorage.storeGet("cly_remote_configs", {}); heartBeat(); // listen to current workers if (cluster.workers) { @@ -354,7 +354,7 @@ Countly.Bulk = Bulk; Countly.city = undefined; Countly.ip_address = undefined; Countly.force_post = undefined; - Countly.storage_path = undefined; + Countly.storage_path = CountlyStorage.clearStoragePath(); Countly.require_consent = undefined; Countly.http_options = undefined; @@ -649,7 +649,7 @@ Countly.Bulk = Bulk; if (eventQueue.length > 0) { toRequestQueue({ events: JSON.stringify(eventQueue) }); eventQueue = []; - storeSet("cly_event", eventQueue); + CountlyStorage.storeSet("cly_event", eventQueue); } // end current session Countly.end_session(); @@ -661,8 +661,8 @@ Countly.Bulk = Bulk; var oldId = Countly.device_id; Countly.device_id = newId; deviceIdType = cc.deviceIdTypeEnums.DEVELOPER_SUPPLIED; - storeSet("cly_id", Countly.device_id); - storeSet("cly_id_type", deviceIdType); + CountlyStorage.storeSet("cly_id", Countly.device_id); + CountlyStorage.storeSet("cly_id_type", deviceIdType); if (merge) { if (Countly.check_any_consent()) { toRequestQueue({ old_device_id: oldId }); @@ -678,7 +678,7 @@ Countly.Bulk = Bulk; if (Countly.remote_config) { remoteConfigs = {}; if (cluster.isMaster) { - storeSet("cly_remote_configs", remoteConfigs); + CountlyStorage.storeSet("cly_remote_configs", remoteConfigs); } Countly.fetch_remote_config(Countly.remote_config); } @@ -757,7 +757,7 @@ Countly.Bulk = Bulk; e.dow = date.getDay(); cc.log(cc.logLevelEnums.DEBUG, "add_cly_events, Adding event: ", event); eventQueue.push(e); - storeSet("cly_event", eventQueue); + CountlyStorage.storeSet("cly_event", eventQueue); } else { process.send({ cly: { event } }); @@ -1072,7 +1072,7 @@ Countly.Bulk = Bulk; process.on("uncaughtException", (err) => { recordError(err, false); if (cluster.isMaster) { - forceStore(); + CountlyStorage.forceStore(); } // eslint-disable-next-line no-console console.error(`${(new Date()).toUTCString()} uncaughtException:`, err.message); @@ -1085,7 +1085,7 @@ Countly.Bulk = Bulk; var err = new Error(`Unhandled rejection (reason: ${reason && reason.stack ? reason.stack : reason}).`); recordError(err, false); if (cluster.isMaster) { - forceStore(); + CountlyStorage.forceStore(); } // eslint-disable-next-line no-console console.error(`${(new Date()).toUTCString()} unhandledRejection:`, err.message); @@ -1168,7 +1168,7 @@ Countly.Bulk = Bulk; remoteConfigs = configs; } if (cluster.isMaster) { - storeSet("cly_remote_configs", remoteConfigs); + CountlyStorage.storeSet("cly_remote_configs", remoteConfigs); cc.log(cc.logLevelEnums.INFO, `fetch_remote_config, Fetched remote config: [${remoteConfigs}].`); } } @@ -1352,7 +1352,7 @@ Countly.Bulk = Bulk; if (cluster.isMaster) { cc.log(cc.logLevelEnums.INFO, "request, Adding the raw request to the queue."); requestQueue.push(request); - storeSet("cly_queue", requestQueue); + CountlyStorage.storeSet("cly_queue", requestQueue); } else { cc.log(cc.logLevelEnums.INFO, "request, Sending message to the parent process. Adding the raw request to the queue."); @@ -1439,7 +1439,7 @@ Countly.Bulk = Bulk; cc.log(cc.logLevelEnums.INFO, "toRequestQueue, Adding request to the queue."); requestQueue.push(request); - storeSet("cly_queue", requestQueue); + CountlyStorage.storeSet("cly_queue", requestQueue); } else { cc.log(cc.logLevelEnums.INFO, "toRequestQueue, Sending message to the parent process. Adding request to the queue."); @@ -1470,7 +1470,7 @@ Countly.Bulk = Bulk; var events = eventQueue.splice(0, maxEventBatch); toRequestQueue({ events: JSON.stringify(events) }); } - storeSet("cly_event", eventQueue); + CountlyStorage.storeSet("cly_event", eventQueue); } // process request queue with event queue @@ -1485,7 +1485,7 @@ Countly.Bulk = Bulk; failTimeout = cc.getTimestamp() + failTimeoutAmount; cc.log(cc.logLevelEnums.ERROR, `makeRequest, Encountered a problem while making the request: [${err}]`); } - storeSet("cly_queue", requestQueue); + CountlyStorage.storeSet("cly_queue", requestQueue); readyToProcess = true; }, "heartBeat", false); } @@ -1748,138 +1748,6 @@ Countly.Bulk = Bulk; } } } - - /** - * Read value from file - * @param {String} key - key for file - * @returns {varies} value in file - */ - var readFile = function(key) { - var dir = path.resolve(__dirname, `${Countly.storage_path}__${key}.json`); - - // try reading data file - var data; - try { - data = fs.readFileSync(dir); - } - catch (ex) { - // there was no file, probably new init - data = null; - } - - try { - // trying to parse json string - data = JSON.parse(data); - } - catch (ex) { - // problem parsing, corrupted file? - cc.log(cc.logLevelEnums.ERROR, `readFile, Failed to parse the file with key: [${key}]. Error: [${ex}].`); - // backup corrupted file data - fs.writeFile(path.resolve(__dirname, `${Countly.storage_path}__${key}.${cc.getTimestamp()}${Math.random()}.json`), data, () => {}); - // start with new clean object - data = null; - } - return data; - }; - - /** - * Force store data synchronously on unrecoverable errors to preserve it for next launch - */ - var forceStore = function() { - for (var i in __data) { - var dir = path.resolve(__dirname, `${Countly.storage_path}__${i}.json`); - var ob = {}; - ob[i] = __data[i]; - try { - fs.writeFileSync(dir, JSON.stringify(ob)); - } - catch (ex) { - // tried to save whats possible - cc.log(cc.logLevelEnums.ERROR, `forceStore, Saving files failed. Error: [${ex}].`); - } - } - }; - - var asyncWriteLock = false; - var asyncWriteQueue = []; - - /** - * Write to file and process queue while in asyncWriteLock - * @param {String} key - key for value to store - * @param {varies} value - value to store - * @param {Function} callback - callback to call when done storing - */ - var writeFile = function(key, value, callback) { - var ob = {}; - ob[key] = value; - var dir = path.resolve(__dirname, `${Countly.storage_path}__${key}.json`); - fs.writeFile(dir, JSON.stringify(ob), (err) => { - if (err) { - cc.log(cc.logLevelEnums.ERROR, `writeFile, Writing files failed. Error: [${err}].`); - } - if (typeof callback === "function") { - callback(err); - } - if (asyncWriteQueue.length) { - setTimeout(() => { - var arr = asyncWriteQueue.shift(); - writeFile(arr[0], arr[1], arr[2]); - }, 0); - } - else { - asyncWriteLock = false; - } - }); - }; - - /** - * Save value in storage - * @param {String} key - key for value to store - * @param {varies} value - value to store - * @param {Function} callback - callback to call when done storing - */ - var storeSet = function(key, value, callback) { - __data[key] = value; - if (!asyncWriteLock) { - asyncWriteLock = true; - writeFile(key, value, callback); - } - else { - asyncWriteQueue.push([key, value, callback]); - } - }; - - /** - * Get value from storage - * @param {String} key - key of value to get - * @param {varies} def - default value to use if not set - * @returns {varies} value for the key - */ - var storeGet = function(key, def) { - cc.log(cc.logLevelEnums.DEBUG, `storeGet, Fetching item from storage with key: [${key}].`); - if (typeof __data[key] === "undefined") { - var ob = readFile(key); - var obLen; - // check if the 'read object' is empty or not - try { - obLen = Object.keys(ob).length; - } - catch (error) { - // if we can not even asses length set it to 0 so we can return the default value - obLen = 0; - } - - // if empty or falsy set default value - if (!ob || obLen === 0) { - __data[key] = def; - } - // else set the value read file has - else { - __data[key] = ob[key]; - } - } - return __data[key]; - }; }()); module.exports = Countly;